aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig14
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bonding/bond_3ad.c26
-rw-r--r--drivers/net/bonding/bond_main.c55
-rw-r--r--drivers/net/bonding/bond_netlink.c50
-rw-r--r--drivers/net/bonding/bond_options.c91
-rw-r--r--drivers/net/bonding/bond_procfs.c8
-rw-r--r--drivers/net/bonding/bond_sysfs.c46
-rw-r--r--drivers/net/can/flexcan.c53
-rw-r--r--drivers/net/can/janz-ican3.c125
-rw-r--r--drivers/net/dsa/Kconfig12
-rw-r--r--drivers/net/dsa/bcm_sf2.c7
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c186
-rw-r--r--drivers/net/dsa/mv88e6131.c185
-rw-r--r--drivers/net/dsa/mv88e6171.c234
-rw-r--r--drivers/net/dsa/mv88e6352.c188
-rw-r--r--drivers/net/dsa/mv88e6xxx.c640
-rw-r--r--drivers/net/dsa/mv88e6xxx.h99
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h155
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c17
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c35
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c99
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c341
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c79
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c384
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c1332
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h236
-rw-r--r--drivers/net/ethernet/apm/xgene/Makefile2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c36
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c207
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h30
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c200
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h49
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c62
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h20
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c170
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h31
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c67
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c53
-rw-r--r--drivers/net/ethernet/cadence/macb.h9
-rw-r--r--drivers/net/ethernet/cavium/Kconfig40
-rw-r--r--drivers/net/ethernet/cavium/Makefile5
-rw-r--r--drivers/net/ethernet/cavium/thunder/Makefile11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h422
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c932
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h213
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c600
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c1331
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c1545
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h381
-rw-r--r--drivers/net/ethernet/cavium/thunder/q_struct.h701
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c966
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h220
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h215
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c238
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c294
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c489
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c382
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2114
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h36
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h86
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h121
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h72
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c123
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c36
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c31
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig9
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h55
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h50
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c11
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h14
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c500
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c78
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c53
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c41
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h5
-rw-r--r--drivers/net/ethernet/intel/e100.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c19
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c26
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c205
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c146
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c160
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c38
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c44
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c106
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c9
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c400
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h520
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c679
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c860
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1899
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c249
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c344
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/flow_table.c422
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c343
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c232
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c444
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c325
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c345
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h171
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c77
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c46
-rw-r--r--drivers/net/ethernet/rocker/rocker.c1579
-rw-r--r--drivers/net/ethernet/rocker/rocker.h28
-rw-r--r--drivers/net/ethernet/sfc/Kconfig9
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c1147
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c783
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h69
-rw-r--r--drivers/net/ethernet/sfc/efx.c316
-rw-r--r--drivers/net/ethernet/sfc/efx.h15
-rw-r--r--drivers/net/ethernet/sfc/enum.h2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c7
-rw-r--r--drivers/net/ethernet/sfc/falcon.c33
-rw-r--r--drivers/net/ethernet/sfc/farch.c64
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c228
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h16
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h434
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c13
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h34
-rw-r--r--drivers/net/ethernet/sfc/nic.h251
-rw-r--r--drivers/net/ethernet/sfc/ptp.c40
-rw-r--r--drivers/net/ethernet/sfc/siena.c27
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c156
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.h79
-rw-r--r--drivers/net/ethernet/sfc/sriov.c83
-rw-r--r--drivers/net/ethernet/sfc/sriov.h31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig90
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c365
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c99
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h9
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c45
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c3
-rw-r--r--drivers/net/ethernet/tile/tilepro.c3
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c3
-rw-r--r--drivers/net/ethernet/via/via-rhine.c249
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c16
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h108
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c288
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c30
-rw-r--r--drivers/net/fddi/skfp/srf.c2
-rw-r--r--drivers/net/geneve.c523
-rw-r--r--drivers/net/hyperv/hyperv_net.h10
-rw-r--r--drivers/net/hyperv/netvsc.c54
-rw-r--r--drivers/net/hyperv/netvsc_drv.c97
-rw-r--r--drivers/net/hyperv/rndis_filter.c16
-rw-r--r--drivers/net/ieee802154/Kconfig10
-rw-r--r--drivers/net/ieee802154/Makefile1
-rw-r--r--drivers/net/ieee802154/at86rf230.c376
-rw-r--r--drivers/net/ieee802154/at86rf230.h220
-rw-r--r--drivers/net/ieee802154/atusb.c699
-rw-r--r--drivers/net/ieee802154/atusb.h84
-rw-r--r--drivers/net/ieee802154/cc2520.c2
-rw-r--r--drivers/net/ieee802154/fakelb.c209
-rw-r--r--drivers/net/ieee802154/mrf24j40.c2
-rw-r--r--drivers/net/ipvlan/ipvlan.h5
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c138
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c25
-rw-r--r--drivers/net/irda/irda-usb.c4
-rw-r--r--drivers/net/macvtap.c34
-rw-r--r--drivers/net/phy/Kconfig13
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c1901
-rw-r--r--drivers/net/phy/bcm7xxx.c7
-rw-r--r--drivers/net/phy/dp83867.c239
-rw-r--r--drivers/net/phy/icplus.c5
-rw-r--r--drivers/net/phy/marvell.c10
-rw-r--r--drivers/net/phy/mdio-bitbang.c7
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/phy/micrel.c53
-rw-r--r--drivers/net/phy/phy.c34
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/ppp/pppox.c2
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/team/team.c10
-rw-r--r--drivers/net/tun.c26
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wan/dscc4.c9
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/adm8211.c27
-rw-r--r--drivers/net/wireless/at76c50x-usb.h2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c165
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h56
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c127
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c50
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c98
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h132
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c208
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c34
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h53
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2747
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h29
-rw-r--r--drivers/net/wireless/ath/ath10k/p2p.c156
-rw-r--r--drivers/net/wireless/ath/ath10k/p2p.h28
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c356
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h95
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h22
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c134
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h22
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h194
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c579
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h168
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c276
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h230
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c321
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.h40
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c155
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c144
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c740
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.h35
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/led.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c7
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c5
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c72
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.h4
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c128
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c107
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c31
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c375
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.h27
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c48
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h24
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h24
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c70
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h40
-rw-r--r--drivers/net/wireless/b43/main.c8
-rw-r--r--drivers/net/wireless/b43legacy/main.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c36
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c206
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/commonring.c18
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/feature.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.c217
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/flowring.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/flowring.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c20
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/of.c11
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/pcie.c184
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.c11
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h3
-rw-r--r--drivers/net/wireless/cw1200/sta.c10
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c4
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c4
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig12
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c71
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h107
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.c113
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h22
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c103
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c22
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h91
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h63
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c23
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c413
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h162
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c16
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c37
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c44
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c1418
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c40
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c35
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c17
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h51
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c415
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c135
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/libertas/cfg.c13
-rw-r--r--drivers/net/wireless/libertas/cfg.h3
-rw-r--r--drivers/net/wireless/libertas/cmd.h3
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c13
-rw-r--r--drivers/net/wireless/libertas_tf/main.c7
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c55
-rw-r--r--drivers/net/wireless/mediatek/Kconfig10
-rw-r--r--drivers/net/wireless/mediatek/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/Kconfig6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/Makefile9
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/core.c78
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/debugfs.c172
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c533
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.h127
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c414
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.h151
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c625
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/initvals.h164
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/initvals_phy.h291
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c569
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.h178
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c412
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.c534
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.h94
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mt7601u.h390
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c1251
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/regs.h636
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/trace.c21
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/trace.h400
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/tx.c319
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.c360
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.h77
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/util.c42
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/util.h77
-rw-r--r--drivers/net/wireless/mwifiex/11h.c48
-rw-r--r--drivers/net/wireless/mwifiex/11n.c24
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c7
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c51
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig2
-rw-r--r--drivers/net/wireless/mwifiex/README6
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c462
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c50
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c365
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c165
-rw-r--r--drivers/net/wireless/mwifiex/ethtool.c97
-rw-r--r--drivers/net/wireless/mwifiex/fw.h9
-rw-r--r--drivers/net/wireless/mwifiex/init.c54
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h6
-rw-r--r--drivers/net/wireless/mwifiex/join.c222
-rw-r--r--drivers/net/wireless/mwifiex/main.c224
-rw-r--r--drivers/net/wireless/mwifiex/main.h59
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c589
-rw-r--r--drivers/net/wireless/mwifiex/scan.c377
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c458
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c159
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c197
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c132
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c147
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c13
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c18
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c86
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c30
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c22
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c44
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c36
-rw-r--r--drivers/net/wireless/mwifiex/usb.c141
-rw-r--r--drivers/net/wireless/mwifiex/util.c34
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c107
-rw-r--r--drivers/net/wireless/mwl8k.c2
-rw-r--r--drivers/net/wireless/p54/fwio.c3
-rw-r--r--drivers/net/wireless/p54/led.c2
-rw-r--r--drivers/net/wireless/p54/main.c6
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c19
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800soc.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c4
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig2
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c7
-rw-r--r--drivers/net/wireless/rtlwifi/core.h3
-rw-r--r--drivers/net/wireless/rtlwifi/regd.c42
-rw-r--r--drivers/net/wireless/rtlwifi/regd.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/fw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/hw.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/fw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/hw.c24
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c9
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c71
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c32
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c4
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/net/xen-netfront.c5
523 files changed, 51405 insertions, 14099 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index df51d6025a90..019fceffc9e5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -179,6 +179,20 @@ config VXLAN
To compile this driver as a module, choose M here: the module
will be called vxlan.
+config GENEVE
+ tristate "Generic Network Virtualization Encapsulation netdev"
+ depends on INET && GENEVE_CORE
+ select NET_IP_TUNNEL
+ ---help---
+ This allows one to create geneve virtual interfaces that provide
+ Layer 2 Networks over Layer 3 Networks. GENEVE is often used
+ to tunnel virtual network infrastructure in virtualized environments.
+ For more information see:
+ http://tools.ietf.org/html/draft-gross-geneve-02
+
+ To compile this driver as a module, choose M here: the module
+ will be called geneve.
+
config NETCONSOLE
tristate "Network console logging support"
---help---
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index e25fdd7d905e..c12cb22478a7 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_VXLAN) += vxlan.o
+obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_NLMON) += nlmon.o
#
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index fbd54f0e32e8..7fde4d5c2b28 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -75,10 +75,10 @@
/* Port Key definitions
* key is determined according to the link speed, duplex and
* user key (which is yet not supported)
- * --------------------------------------------------------------
- * Port key : | User key | Speed | Duplex |
- * --------------------------------------------------------------
- * 16 6 1 0
+ * --------------------------------------------------------------
+ * Port key | User key (10 bits) | Speed (5 bits) | Duplex|
+ * --------------------------------------------------------------
+ * |15 6|5 1|0
*/
#define AD_DUPLEX_KEY_MASKS 0x1
#define AD_SPEED_KEY_MASKS 0x3E
@@ -1908,8 +1908,14 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
BOND_AD_INFO(bond).aggregator_identifier = 0;
- BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
- BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
+ BOND_AD_INFO(bond).system.sys_priority =
+ bond->params.ad_actor_sys_prio;
+ if (is_zero_ether_addr(bond->params.ad_actor_system))
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->dev->dev_addr);
+ else
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->params.ad_actor_system);
/* initialize how many times this module is called in one
* second (should be about every 100ms)
@@ -1945,10 +1951,10 @@ void bond_3ad_bind_slave(struct slave *slave)
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave)->id;
- /* key is determined according to the link speed, duplex and user key(which
- * is yet not supported)
+ /* key is determined according to the link speed, duplex and
+ * user key
*/
- port->actor_admin_port_key = 0;
+ port->actor_admin_port_key = bond->params.ad_user_port_key << 6;
port->actor_admin_port_key |= __get_duplex(port);
port->actor_admin_port_key |= (__get_link_speed(port) << 1);
port->actor_oper_port_key = port->actor_admin_port_key;
@@ -1959,6 +1965,8 @@ void bond_3ad_bind_slave(struct slave *slave)
port->sm_vars &= ~AD_PORT_LACP_ENABLED;
/* actor system is the bond's system */
port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
+ port->actor_system_priority =
+ BOND_AD_INFO(bond).system.sys_priority;
/* tx timer(to verify that no more than MAX_TX_IN_SECOND
* lacpdu's are sent in one second)
*/
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d5fe5d5f490f..19eb990d398c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -76,7 +76,7 @@
#include <net/netns/generic.h>
#include <net/pkt_sched.h>
#include <linux/rculist.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
#include <net/switchdev.h>
#include <net/bonding.h>
#include <net/bond_3ad.h>
@@ -1015,10 +1015,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t mask;
struct slave *slave;
- /* If any slave has the offload feature flag set,
- * set the offload flag on the bond.
- */
- mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
+ mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
@@ -3054,16 +3051,15 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
int noff, proto = -1;
if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
- return skb_flow_dissect(skb, fk);
+ return skb_flow_dissect_flow_keys(skb, fk);
- fk->ports = 0;
+ fk->ports.ports = 0;
noff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP)) {
if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
return false;
iph = ip_hdr(skb);
- fk->src = iph->saddr;
- fk->dst = iph->daddr;
+ iph_to_flow_copy_v4addrs(fk, iph);
noff += iph->ihl << 2;
if (!ip_is_fragment(iph))
proto = iph->protocol;
@@ -3071,15 +3067,14 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
return false;
iph6 = ipv6_hdr(skb);
- fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
- fk->dst = (__force __be32)ipv6_addr_hash(&iph6->daddr);
+ iph_to_flow_copy_v6addrs(fk, iph6);
noff += sizeof(*iph6);
proto = iph6->nexthdr;
} else {
return false;
}
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
- fk->ports = skb_flow_get_ports(skb, noff, proto);
+ fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
return true;
}
@@ -3105,8 +3100,9 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
hash = bond_eth_hash(skb);
else
- hash = (__force u32)flow.ports;
- hash ^= (__force u32)flow.dst ^ (__force u32)flow.src;
+ hash = (__force u32)flow.ports.ports;
+ hash ^= (__force u32)flow_get_u32_dst(&flow) ^
+ (__force u32)flow_get_u32_src(&flow);
hash ^= (hash >> 16);
hash ^= (hash >> 8);
@@ -4039,8 +4035,12 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release,
.ndo_fix_features = bond_fix_features,
- .ndo_bridge_setlink = ndo_dflt_netdev_switch_port_bridge_setlink,
- .ndo_bridge_dellink = ndo_dflt_netdev_switch_port_bridge_dellink,
+ .ndo_bridge_setlink = switchdev_port_bridge_setlink,
+ .ndo_bridge_getlink = switchdev_port_bridge_getlink,
+ .ndo_bridge_dellink = switchdev_port_bridge_dellink,
+ .ndo_fdb_add = switchdev_port_fdb_add,
+ .ndo_fdb_del = switchdev_port_fdb_del,
+ .ndo_fdb_dump = switchdev_port_fdb_dump,
.ndo_features_check = passthru_features_check,
};
@@ -4140,6 +4140,8 @@ static int bond_check_params(struct bond_params *params)
struct bond_opt_value newval;
const struct bond_opt_value *valptr;
int arp_all_targets_value;
+ u16 ad_actor_sys_prio = 0;
+ u16 ad_user_port_key = 0;
/* Convert string parameters. */
if (mode) {
@@ -4434,6 +4436,24 @@ static int bond_check_params(struct bond_params *params)
fail_over_mac_value = BOND_FOM_NONE;
}
+ bond_opt_initstr(&newval, "default");
+ valptr = bond_opt_parse(
+ bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
+ &newval);
+ if (!valptr) {
+ pr_err("Error: No ad_actor_sys_prio default value");
+ return -EINVAL;
+ }
+ ad_actor_sys_prio = valptr->value;
+
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
+ &newval);
+ if (!valptr) {
+ pr_err("Error: No ad_user_port_key default value");
+ return -EINVAL;
+ }
+ ad_user_port_key = valptr->value;
+
if (lp_interval == 0) {
pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
@@ -4462,6 +4482,9 @@ static int bond_check_params(struct bond_params *params)
params->lp_interval = lp_interval;
params->packets_per_slave = packets_per_slave;
params->tlb_dynamic_lb = 1; /* Default value */
+ params->ad_actor_sys_prio = ad_actor_sys_prio;
+ eth_zero_addr(params->ad_actor_system);
+ params->ad_user_port_key = ad_user_port_key;
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 7b1124366011..f7015eb4f8db 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -94,6 +94,10 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
[IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
[IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
+ [IFLA_BOND_AD_ACTOR_SYS_PRIO] = { .type = NLA_U16 },
+ [IFLA_BOND_AD_USER_PORT_KEY] = { .type = NLA_U16 },
+ [IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY,
+ .len = ETH_ALEN },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -379,6 +383,36 @@ static int bond_changelink(struct net_device *bond_dev,
if (err)
return err;
}
+ if (data[IFLA_BOND_AD_ACTOR_SYS_PRIO]) {
+ int actor_sys_prio =
+ nla_get_u16(data[IFLA_BOND_AD_ACTOR_SYS_PRIO]);
+
+ bond_opt_initval(&newval, actor_sys_prio);
+ err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYS_PRIO, &newval);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BOND_AD_USER_PORT_KEY]) {
+ int port_key =
+ nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
+
+ bond_opt_initval(&newval, port_key);
+ err = __bond_opt_set(bond, BOND_OPT_AD_USER_PORT_KEY, &newval);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BOND_AD_ACTOR_SYSTEM]) {
+ if (nla_len(data[IFLA_BOND_AD_ACTOR_SYSTEM]) != ETH_ALEN)
+ return -EINVAL;
+
+ bond_opt_initval(&newval,
+ nla_get_be64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
+ err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -426,6 +460,9 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_ACTOR_SYS_PRIO */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */
+ nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
0;
}
@@ -551,6 +588,19 @@ static int bond_fill_info(struct sk_buff *skb,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
+ if (nla_put_u16(skb, IFLA_BOND_AD_ACTOR_SYS_PRIO,
+ bond->params.ad_actor_sys_prio))
+ goto nla_put_failure;
+
+ if (nla_put_u16(skb, IFLA_BOND_AD_USER_PORT_KEY,
+ bond->params.ad_user_port_key))
+ goto nla_put_failure;
+
+ if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM,
+ sizeof(bond->params.ad_actor_system),
+ &bond->params.ad_actor_system))
+ goto nla_put_failure;
+
if (!bond_3ad_get_active_agg_info(bond, &info)) {
struct nlattr *nest;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index e8d3c1d35453..e9c624d54dd4 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -70,6 +70,12 @@ static int bond_option_slaves_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_ad_actor_system_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_ad_user_port_key_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
@@ -186,6 +192,18 @@ static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
{ NULL, -1, 0}
};
+static const struct bond_opt_value bond_ad_actor_sys_prio_tbl[] = {
+ { "minval", 1, BOND_VALFLAG_MIN},
+ { "maxval", 65535, BOND_VALFLAG_MAX | BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0},
+};
+
+static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
+ { "minval", 0, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
+ { "maxval", 1023, BOND_VALFLAG_MAX},
+ { NULL, -1, 0},
+};
+
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -379,6 +397,29 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_tlb_dynamic_lb_tbl,
.flags = BOND_OPTFLAG_IFDOWN,
.set = bond_option_tlb_dynamic_lb_set,
+ },
+ [BOND_OPT_AD_ACTOR_SYS_PRIO] = {
+ .id = BOND_OPT_AD_ACTOR_SYS_PRIO,
+ .name = "ad_actor_sys_prio",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .values = bond_ad_actor_sys_prio_tbl,
+ .set = bond_option_ad_actor_sys_prio_set,
+ },
+ [BOND_OPT_AD_ACTOR_SYSTEM] = {
+ .id = BOND_OPT_AD_ACTOR_SYSTEM,
+ .name = "ad_actor_system",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .flags = BOND_OPTFLAG_RAWVAL | BOND_OPTFLAG_IFDOWN,
+ .set = bond_option_ad_actor_system_set,
+ },
+ [BOND_OPT_AD_USER_PORT_KEY] = {
+ .id = BOND_OPT_AD_USER_PORT_KEY,
+ .name = "ad_user_port_key",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .values = bond_ad_user_port_key_tbl,
+ .set = bond_option_ad_user_port_key_set,
}
};
@@ -1349,3 +1390,53 @@ static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
return 0;
}
+
+static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ netdev_info(bond->dev, "Setting ad_actor_sys_prio to %llu\n",
+ newval->value);
+
+ bond->params.ad_actor_sys_prio = newval->value;
+ return 0;
+}
+
+static int bond_option_ad_actor_system_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ u8 macaddr[ETH_ALEN];
+ u8 *mac;
+ int i;
+
+ if (newval->string) {
+ i = sscanf(newval->string, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+ &macaddr[0], &macaddr[1], &macaddr[2],
+ &macaddr[3], &macaddr[4], &macaddr[5]);
+ if (i != ETH_ALEN)
+ goto err;
+ mac = macaddr;
+ } else {
+ mac = (u8 *)&newval->value;
+ }
+
+ if (!is_valid_ether_addr(mac))
+ goto err;
+
+ netdev_info(bond->dev, "Setting ad_actor_system to %pM\n", mac);
+ ether_addr_copy(bond->params.ad_actor_system, mac);
+ return 0;
+
+err:
+ netdev_err(bond->dev, "Invalid MAC address.\n");
+ return -EINVAL;
+}
+
+static int bond_option_ad_user_port_key_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ netdev_info(bond->dev, "Setting ad_user_port_key to %llu\n",
+ newval->value);
+
+ bond->params.ad_user_port_key = newval->value;
+ return 0;
+}
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index b20b35acb47d..e7f3047a26df 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -135,6 +135,10 @@ static void bond_info_show_master(struct seq_file *seq)
bond->params.ad_select);
seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
optval->string);
+ seq_printf(seq, "System priority: %d\n",
+ BOND_AD_INFO(bond).system.sys_priority);
+ seq_printf(seq, "System MAC address: %pM\n",
+ &BOND_AD_INFO(bond).system.sys_mac_addr);
if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
seq_printf(seq, "bond %s has no active aggregator\n",
@@ -198,6 +202,8 @@ static void bond_info_show_slave(struct seq_file *seq,
seq_puts(seq, "details actor lacp pdu:\n");
seq_printf(seq, " system priority: %d\n",
port->actor_system_priority);
+ seq_printf(seq, " system mac address: %pM\n",
+ &port->actor_system);
seq_printf(seq, " port key: %d\n",
port->actor_oper_port_key);
seq_printf(seq, " port priority: %d\n",
@@ -210,6 +216,8 @@ static void bond_info_show_slave(struct seq_file *seq,
seq_puts(seq, "details partner lacp pdu:\n");
seq_printf(seq, " system priority: %d\n",
port->partner_oper.system_priority);
+ seq_printf(seq, " system mac address: %pM\n",
+ &port->partner_oper.system);
seq_printf(seq, " oper key: %d\n",
port->partner_oper.key);
seq_printf(seq, " port priority: %d\n",
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 7e9e151d4d61..143a2abd1c1c 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -692,6 +692,49 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
bonding_show_packets_per_slave, bonding_sysfs_store_option);
+static ssize_t bonding_show_ad_actor_sys_prio(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ return sprintf(buf, "%hu\n", bond->params.ad_actor_sys_prio);
+
+ return 0;
+}
+static DEVICE_ATTR(ad_actor_sys_prio, S_IRUGO | S_IWUSR,
+ bonding_show_ad_actor_sys_prio, bonding_sysfs_store_option);
+
+static ssize_t bonding_show_ad_actor_system(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ return sprintf(buf, "%pM\n", bond->params.ad_actor_system);
+
+ return 0;
+}
+
+static DEVICE_ATTR(ad_actor_system, S_IRUGO | S_IWUSR,
+ bonding_show_ad_actor_system, bonding_sysfs_store_option);
+
+static ssize_t bonding_show_ad_user_port_key(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ return sprintf(buf, "%hu\n", bond->params.ad_user_port_key);
+
+ return 0;
+}
+static DEVICE_ATTR(ad_user_port_key, S_IRUGO | S_IWUSR,
+ bonding_show_ad_user_port_key, bonding_sysfs_store_option);
+
static struct attribute *per_bond_attrs[] = {
&dev_attr_slaves.attr,
&dev_attr_mode.attr,
@@ -725,6 +768,9 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_lp_interval.attr,
&dev_attr_packets_per_slave.attr,
&dev_attr_tlb_dynamic_lb.attr,
+ &dev_attr_ad_actor_sys_prio.attr,
+ &dev_attr_ad_actor_system.attr,
+ &dev_attr_ad_user_port_key.attr,
NULL,
};
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index ad0a7e8c2c2b..6201c5a1a884 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -93,13 +93,13 @@
(FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE)
/* FLEXCAN control register 2 (CTRL2) bits */
-#define FLEXCAN_CRL2_ECRWRE BIT(29)
-#define FLEXCAN_CRL2_WRMFRZ BIT(28)
-#define FLEXCAN_CRL2_RFFN(x) (((x) & 0x0f) << 24)
-#define FLEXCAN_CRL2_TASD(x) (((x) & 0x1f) << 19)
-#define FLEXCAN_CRL2_MRP BIT(18)
-#define FLEXCAN_CRL2_RRS BIT(17)
-#define FLEXCAN_CRL2_EACEN BIT(16)
+#define FLEXCAN_CTRL2_ECRWRE BIT(29)
+#define FLEXCAN_CTRL2_WRMFRZ BIT(28)
+#define FLEXCAN_CTRL2_RFFN(x) (((x) & 0x0f) << 24)
+#define FLEXCAN_CTRL2_TASD(x) (((x) & 0x1f) << 19)
+#define FLEXCAN_CTRL2_MRP BIT(18)
+#define FLEXCAN_CTRL2_RRS BIT(17)
+#define FLEXCAN_CTRL2_EACEN BIT(16)
/* FLEXCAN memory error control register (MECR) bits */
#define FLEXCAN_MECR_ECRWRDIS BIT(31)
@@ -158,7 +158,6 @@
FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID))
/* FLEXCAN message buffers */
-#define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24)
#define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24)
#define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24)
#define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24)
@@ -184,14 +183,14 @@
* FLEXCAN hardware feature flags
*
* Below is some version info we got:
- * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err
- * Filter? connected? detection
- * MX25 FlexCAN2 03.00.00.00 no no no
- * MX28 FlexCAN2 03.00.04.00 yes yes no
- * MX35 FlexCAN2 03.00.00.00 no no no
- * MX53 FlexCAN2 03.00.00.00 yes no no
- * MX6s FlexCAN3 10.00.12.00 yes yes no
- * VF610 FlexCAN3 ? no yes yes
+ * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err RTR re-
+ * Filter? connected? detection ception in MB
+ * MX25 FlexCAN2 03.00.00.00 no no no no
+ * MX28 FlexCAN2 03.00.04.00 yes yes no no
+ * MX35 FlexCAN2 03.00.00.00 no no no no
+ * MX53 FlexCAN2 03.00.00.00 yes no no no
+ * MX6s FlexCAN3 10.00.12.00 yes yes no yes
+ * VF610 FlexCAN3 ? no yes yes yes?
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
@@ -221,7 +220,7 @@ struct flexcan_regs {
u32 imask1; /* 0x28 */
u32 iflag2; /* 0x2c */
u32 iflag1; /* 0x30 */
- u32 crl2; /* 0x34 */
+ u32 ctrl2; /* 0x34 */
u32 esr2; /* 0x38 */
u32 imeur; /* 0x3c */
u32 lrfr; /* 0x40 */
@@ -230,6 +229,16 @@ struct flexcan_regs {
u32 rxfir; /* 0x4c */
u32 _reserved3[12]; /* 0x50 */
struct flexcan_mb cantxfg[64]; /* 0x80 */
+ /* FIFO-mode:
+ * MB
+ * 0x080...0x08f 0 RX message buffer
+ * 0x090...0x0df 1-5 reserverd
+ * 0x0e0...0x0ff 6-7 8 entry ID table
+ * (mx25, mx28, mx35, mx53)
+ * 0x0e0...0x2df 6-7..37 8..128 entry ID table
+ * size conf'ed via ctrl2::RFFN
+ * (mx6, vf610)
+ */
u32 _reserved4[408];
u32 mecr; /* 0xae0 */
u32 erriar; /* 0xae4 */
@@ -468,7 +477,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->base;
struct can_frame *cf = (struct can_frame *)skb->data;
u32 can_id;
- u32 ctrl = FLEXCAN_MB_CNT_CODE(0xc) | (cf->can_dlc << 16);
+ u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | (cf->can_dlc << 16);
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
@@ -815,7 +824,7 @@ static int flexcan_chip_start(struct net_device *dev)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->base;
- u32 reg_mcr, reg_ctrl, reg_crl2, reg_mecr;
+ u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr;
int err, i;
/* enable module */
@@ -918,9 +927,9 @@ static int flexcan_chip_start(struct net_device *dev)
* and Correction of Memory Errors" to write to
* MECR register
*/
- reg_crl2 = flexcan_read(&regs->crl2);
- reg_crl2 |= FLEXCAN_CRL2_ECRWRE;
- flexcan_write(reg_crl2, &regs->crl2);
+ reg_ctrl2 = flexcan_read(&regs->ctrl2);
+ reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
+ flexcan_write(reg_ctrl2, &regs->ctrl2);
reg_mecr = flexcan_read(&regs->mecr);
reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 4dd183a3643a..c1e85368a198 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -40,6 +40,7 @@
#define MSYNC_PEER 0x00 /* ICAN only */
#define MSYNC_LOCL 0x01 /* host only */
#define TARGET_RUNNING 0x02
+#define FIRMWARE_STAMP 0x60 /* big endian firmware stamp */
#define MSYNC_RB0 0x01
#define MSYNC_RB1 0x02
@@ -83,6 +84,7 @@
#define MSG_COFFREQ 0x42
#define MSG_CONREQ 0x43
#define MSG_CCONFREQ 0x47
+#define MSG_LMTS 0xb4
/*
* Janz ICAN3 CAN Inquiry Message Types
@@ -165,6 +167,12 @@
/* SJA1000 Clock Input */
#define ICAN3_CAN_CLOCK 8000000
+/* Janz ICAN3 firmware types */
+enum ican3_fwtype {
+ ICAN3_FWTYPE_ICANOS,
+ ICAN3_FWTYPE_CAL_CANOPEN,
+};
+
/* Driver Name */
#define DRV_NAME "janz-ican3"
@@ -215,6 +223,10 @@ struct ican3_dev {
struct completion buserror_comp;
struct can_berr_counter bec;
+ /* firmware type */
+ enum ican3_fwtype fwtype;
+ char fwinfo[32];
+
/* old and new style host interface */
unsigned int iftype;
@@ -750,13 +762,61 @@ static int ican3_set_id_filter(struct ican3_dev *mod, bool accept)
*/
static int ican3_set_bus_state(struct ican3_dev *mod, bool on)
{
+ struct can_bittiming *bt = &mod->can.bittiming;
struct ican3_msg msg;
+ u8 btr0, btr1;
+ int res;
- memset(&msg, 0, sizeof(msg));
- msg.spec = on ? MSG_CONREQ : MSG_COFFREQ;
- msg.len = cpu_to_le16(0);
+ /* This algorithm was stolen from drivers/net/can/sja1000/sja1000.c */
+ /* The bittiming register command for the ICAN3 just sets the bit timing */
+ /* registers on the SJA1000 chip directly */
+ btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+ btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+ (((bt->phase_seg2 - 1) & 0x7) << 4);
+ if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ btr1 |= 0x80;
- return ican3_send_msg(mod, &msg);
+ if (mod->fwtype == ICAN3_FWTYPE_ICANOS) {
+ if (on) {
+ /* set bittiming */
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_CBTRREQ;
+ msg.len = cpu_to_le16(4);
+ msg.data[0] = 0x00;
+ msg.data[1] = 0x00;
+ msg.data[2] = btr0;
+ msg.data[3] = btr1;
+
+ res = ican3_send_msg(mod, &msg);
+ if (res)
+ return res;
+ }
+
+ /* can-on/off request */
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = on ? MSG_CONREQ : MSG_COFFREQ;
+ msg.len = cpu_to_le16(0);
+
+ return ican3_send_msg(mod, &msg);
+
+ } else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) {
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_LMTS;
+ if (on) {
+ msg.len = cpu_to_le16(4);
+ msg.data[0] = 0;
+ msg.data[1] = 0;
+ msg.data[2] = btr0;
+ msg.data[3] = btr1;
+ } else {
+ msg.len = cpu_to_le16(2);
+ msg.data[0] = 1;
+ msg.data[1] = 0;
+ }
+
+ return ican3_send_msg(mod, &msg);
+ }
+ return -ENOTSUPP;
}
static int ican3_set_termination(struct ican3_dev *mod, bool on)
@@ -1402,7 +1462,7 @@ static int ican3_reset_module(struct ican3_dev *mod)
return 0;
msleep(10);
- } while (time_before(jiffies, start + HZ / 4));
+ } while (time_before(jiffies, start + HZ / 2));
netdev_err(mod->ndev, "failed to reset CAN module\n");
return -ETIMEDOUT;
@@ -1427,6 +1487,17 @@ static int ican3_startup_module(struct ican3_dev *mod)
return ret;
}
+ /* detect firmware */
+ memcpy_fromio(mod->fwinfo, mod->dpm + FIRMWARE_STAMP, sizeof(mod->fwinfo) - 1);
+ if (strncmp(mod->fwinfo, "JANZ-ICAN3", 10)) {
+ netdev_err(mod->ndev, "ICAN3 not detected (found %s)\n", mod->fwinfo);
+ return -ENODEV;
+ }
+ if (strstr(mod->fwinfo, "CAL/CANopen"))
+ mod->fwtype = ICAN3_FWTYPE_CAL_CANOPEN;
+ else
+ mod->fwtype = ICAN3_FWTYPE_ICANOS;
+
/* re-enable interrupts so we can send messages */
iowrite8(1 << mod->num, &mod->ctrl->int_enable);
@@ -1615,36 +1686,6 @@ static const struct can_bittiming_const ican3_bittiming_const = {
.brp_inc = 1,
};
-/*
- * This routine was stolen from drivers/net/can/sja1000/sja1000.c
- *
- * The bittiming register command for the ICAN3 just sets the bit timing
- * registers on the SJA1000 chip directly
- */
-static int ican3_set_bittiming(struct net_device *ndev)
-{
- struct ican3_dev *mod = netdev_priv(ndev);
- struct can_bittiming *bt = &mod->can.bittiming;
- struct ican3_msg msg;
- u8 btr0, btr1;
-
- btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
- btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
- (((bt->phase_seg2 - 1) & 0x7) << 4);
- if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- btr1 |= 0x80;
-
- memset(&msg, 0, sizeof(msg));
- msg.spec = MSG_CBTRREQ;
- msg.len = cpu_to_le16(4);
- msg.data[0] = 0x00;
- msg.data[1] = 0x00;
- msg.data[2] = btr0;
- msg.data[3] = btr1;
-
- return ican3_send_msg(mod, &msg);
-}
-
static int ican3_set_mode(struct net_device *ndev, enum can_mode mode)
{
struct ican3_dev *mod = netdev_priv(ndev);
@@ -1730,11 +1771,22 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
return count;
}
+static ssize_t ican3_sysfs_show_fwinfo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo);
+}
+
static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
ican3_sysfs_set_term);
+static DEVICE_ATTR(fwinfo, S_IRUSR | S_IRUGO, ican3_sysfs_show_fwinfo, NULL);
static struct attribute *ican3_sysfs_attrs[] = {
&dev_attr_termination.attr,
+ &dev_attr_fwinfo.attr,
NULL,
};
@@ -1794,7 +1846,6 @@ static int ican3_probe(struct platform_device *pdev)
mod->can.clock.freq = ICAN3_CAN_CLOCK;
mod->can.bittiming_const = &ican3_bittiming_const;
- mod->can.do_set_bittiming = ican3_set_bittiming;
mod->can.do_set_mode = ican3_set_mode;
mod->can.do_get_berr_counter = ican3_get_berr_counter;
mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES
@@ -1866,7 +1917,7 @@ static int ican3_probe(struct platform_device *pdev)
goto out_free_irq;
}
- dev_info(dev, "module %d: registered CAN device\n", pdata->modno);
+ netdev_info(mod->ndev, "module %d: registered CAN device\n", pdata->modno);
return 0;
out_free_irq:
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 18550c7ebe6f..7ad0a4d8e475 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -37,22 +37,22 @@ config NET_DSA_MV88E6123_61_65
ethernet switch chips.
config NET_DSA_MV88E6171
- tristate "Marvell 88E6171/6172 ethernet switch chip support"
+ tristate "Marvell 88E6171/6175/6350/6351 ethernet switch chip support"
depends on NET_DSA
select NET_DSA_MV88E6XXX
select NET_DSA_TAG_EDSA
---help---
- This enables support for the Marvell 88E6171/6172 ethernet switch
- chips.
+ This enables support for the Marvell 88E6171/6175/6350/6351
+ ethernet switches chips.
config NET_DSA_MV88E6352
- tristate "Marvell 88E6176/88E6352 ethernet switch chip support"
+ tristate "Marvell 88E6172/88E6176/88E6352 ethernet switch chip support"
depends on NET_DSA
select NET_DSA_MV88E6XXX
select NET_DSA_TAG_EDSA
---help---
- This enables support for the Marvell 88E6176 and 88E6352 ethernet
- switch chips.
+ This enables support for the Marvell 88E6172, 88E6176 and 88E6352
+ ethernet switch chips.
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index cedb572bf25a..103fde3da476 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -911,6 +911,13 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
*/
if (port == 7) {
status->link = priv->port_sts[port].link;
+ /* For MoCA interfaces, also force a link down notification
+ * since some version of the user-space daemon (mocad) use
+ * cmd->autoneg to force the link, which messes up the PHY
+ * state machine and make it go in PHY_FORCING state instead.
+ */
+ if (!status->link)
+ netif_carrier_off(ds->ports[port]);
status->duplex = 1;
} else {
status->link = 1;
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index b4af6d5aff7c..71a29a7ce538 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -54,192 +54,40 @@ static char *mv88e6123_61_65_probe(struct device *host_dev, int sw_addr)
static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
{
+ u32 upstream_port = dsa_upstream_port(ds);
int ret;
- int i;
+ u32 reg;
+
+ ret = mv88e6xxx_setup_global(ds);
+ if (ret)
+ return ret;
/* Disable the PHY polling unit (since there won't be any
* external PHYs to poll), don't discard packets with
* excessive collisions, and mask all interrupt sources.
*/
- REG_WRITE(REG_GLOBAL, 0x04, 0x0000);
-
- /* Set the default address aging time to 5 minutes, and
- * enable address learn messages to be sent to all message
- * ports.
- */
- REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
- /* Configure the priority mapping registers. */
- ret = mv88e6xxx_config_prio(ds);
- if (ret < 0)
- return ret;
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, 0x0000);
/* Configure the upstream port, and configure the upstream
* port as the port to which ingress and egress monitor frames
* are to be sent.
*/
- REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+ reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+ REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
/* Disable remote management for now, and set the switch's
* DSA device number.
*/
- REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
-
- /* Send all frames with destination addresses matching
- * 01:80:c2:00:00:2x to the CPU port.
- */
- REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
-
- /* Send all frames with destination addresses matching
- * 01:80:c2:00:00:0x to the CPU port.
- */
- REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
- /* Disable the loopback filter, disable flow control
- * messages, disable flood broadcast override, disable
- * removing of provider tags, disable ATU age violation
- * interrupts, disable tag flow control, force flow
- * control priority to the highest, and send all special
- * multicast frames to the CPU at the highest priority.
- */
- REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
- /* Program the DSA routing table. */
- for (i = 0; i < 32; i++) {
- int nexthop;
-
- nexthop = 0x1f;
- if (i != ds->index && i < ds->dst->pd->nr_chips)
- nexthop = ds->pd->rtable[i] & 0x1f;
-
- REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
- }
-
- /* Clear all trunk masks. */
- for (i = 0; i < 8; i++)
- REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
-
- /* Clear all trunk mappings. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
-
- /* Disable ingress rate limiting by resetting all ingress
- * rate limit registers to their initial state.
- */
- for (i = 0; i < 6; i++)
- REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
-
- /* Initialise cross-chip port VLAN table to reset defaults. */
- REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
-
- /* Clear the priority override table. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
-
- /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
return 0;
}
-static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
-{
- int addr = REG_PORT(p);
- u16 val;
-
- /* MAC Forcing register: don't force link, speed, duplex
- * or flow control state to any particular values on physical
- * ports, but force the CPU port and all DSA ports to 1000 Mb/s
- * full duplex.
- */
- if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
- REG_WRITE(addr, 0x01, 0x003e);
- else
- REG_WRITE(addr, 0x01, 0x0003);
-
- /* Do not limit the period of time that this port can be
- * paused for by the remote end or the period of time that
- * this port can pause the remote end.
- */
- REG_WRITE(addr, 0x02, 0x0000);
-
- /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
- * disable Header mode, enable IGMP/MLD snooping, disable VLAN
- * tunneling, determine priority by looking at 802.1p and IP
- * priority fields (IP prio has precedence), and set STP state
- * to Forwarding.
- *
- * If this is the CPU link, use DSA or EDSA tagging depending
- * on which tagging mode was configured.
- *
- * If this is a link to another switch, use DSA tagging mode.
- *
- * If this is the upstream port for this switch, enable
- * forwarding of unknown unicasts and multicasts.
- */
- val = 0x0433;
- if (dsa_is_cpu_port(ds, p)) {
- if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
- val |= 0x3300;
- else
- val |= 0x0100;
- }
- if (ds->dsa_port_mask & (1 << p))
- val |= 0x0100;
- if (p == dsa_upstream_port(ds))
- val |= 0x000c;
- REG_WRITE(addr, 0x04, val);
-
- /* Port Control 2: don't force a good FCS, set the maximum
- * frame size to 10240 bytes, don't let the switch add or
- * strip 802.1q tags, don't discard tagged or untagged frames
- * on this port, do a destination address lookup on all
- * received packets as usual, disable ARP mirroring and don't
- * send a copy of all transmitted/received frames on this port
- * to the CPU.
- */
- REG_WRITE(addr, 0x08, 0x2080);
-
- /* Egress rate control: disable egress rate control. */
- REG_WRITE(addr, 0x09, 0x0001);
-
- /* Egress rate control 2: disable egress rate control. */
- REG_WRITE(addr, 0x0a, 0x0000);
-
- /* Port Association Vector: when learning source addresses
- * of packets, add the address to the address database using
- * a port bitmap that has only the bit for this port set and
- * the other bits clear.
- */
- REG_WRITE(addr, 0x0b, 1 << p);
-
- /* Port ATU control: disable limiting the number of address
- * database entries that this port is allowed to use.
- */
- REG_WRITE(addr, 0x0c, 0x0000);
-
- /* Priority Override: disable DA, SA and VTU priority override. */
- REG_WRITE(addr, 0x0d, 0x0000);
-
- /* Port Ethertype: use the Ethertype DSA Ethertype value. */
- REG_WRITE(addr, 0x0f, ETH_P_EDSA);
-
- /* Tag Remap: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x18, 0x3210);
-
- /* Tag Remap 2: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x19, 0x7654);
-
- return mv88e6xxx_setup_port_common(ds, p);
-}
-
static int mv88e6123_61_65_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int i;
int ret;
ret = mv88e6xxx_setup_common(ds);
@@ -262,19 +110,11 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
- /* @@@ initialise vtu and atu */
-
ret = mv88e6123_61_65_setup_global(ds);
if (ret < 0)
return ret;
- for (i = 0; i < ps->num_ports; i++) {
- ret = mv88e6123_61_65_setup_port(ds, i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
+ return mv88e6xxx_setup_ports(ds);
}
struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index e54824fa0d95..32f4a08e9bc9 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -37,6 +37,8 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
return "Marvell 88E6131 (B2)";
if (ret_masked == PORT_SWITCH_ID_6131)
return "Marvell 88E6131";
+ if (ret_masked == PORT_SWITCH_ID_6185)
+ return "Marvell 88E6185";
}
return NULL;
@@ -44,186 +46,62 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
static int mv88e6131_setup_global(struct dsa_switch *ds)
{
+ u32 upstream_port = dsa_upstream_port(ds);
int ret;
- int i;
+ u32 reg;
+
+ ret = mv88e6xxx_setup_global(ds);
+ if (ret)
+ return ret;
/* Enable the PHY polling unit, don't discard packets with
* excessive collisions, use a weighted fair queueing scheme
* to arbitrate between packet queues, set the maximum frame
* size to 1632, and mask all interrupt sources.
*/
- REG_WRITE(REG_GLOBAL, 0x04, 0x4400);
-
- /* Set the default address aging time to 5 minutes, and
- * enable address learn messages to be sent to all message
- * ports.
- */
- REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
- /* Configure the priority mapping registers. */
- ret = mv88e6xxx_config_prio(ds);
- if (ret < 0)
- return ret;
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+ GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_MAX_FRAME_1632);
/* Set the VLAN ethertype to 0x8100. */
- REG_WRITE(REG_GLOBAL, 0x19, 0x8100);
+ REG_WRITE(REG_GLOBAL, GLOBAL_CORE_TAG_TYPE, 0x8100);
/* Disable ARP mirroring, and configure the upstream port as
* the port to which ingress and egress monitor frames are to
* be sent.
*/
- REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0);
+ reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+ GLOBAL_MONITOR_CONTROL_ARP_DISABLED;
+ REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
/* Disable cascade port functionality unless this device
* is used in a cascade configuration, and set the switch's
* DSA device number.
*/
if (ds->dst->pd->nr_chips > 1)
- REG_WRITE(REG_GLOBAL, 0x1c, 0xf000 | (ds->index & 0x1f));
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
+ GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
+ (ds->index & 0x1f));
else
- REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f));
-
- /* Send all frames with destination addresses matching
- * 01:80:c2:00:00:0x to the CPU port.
- */
- REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
- /* Ignore removed tag data on doubly tagged packets, disable
- * flow control messages, force flow control priority to the
- * highest, and send all special multicast frames to the CPU
- * port at the highest priority.
- */
- REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
- /* Program the DSA routing table. */
- for (i = 0; i < 32; i++) {
- int nexthop;
-
- nexthop = 0x1f;
- if (ds->pd->rtable &&
- i != ds->index && i < ds->dst->pd->nr_chips)
- nexthop = ds->pd->rtable[i] & 0x1f;
-
- REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
- }
-
- /* Clear all trunk masks. */
- for (i = 0; i < 8; i++)
- REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7ff);
-
- /* Clear all trunk mappings. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
+ GLOBAL_CONTROL_2_NO_CASCADE |
+ (ds->index & 0x1f));
/* Force the priority of IGMP/MLD snoop frames and ARP frames
* to the highest setting.
*/
- REG_WRITE(REG_GLOBAL2, 0x0f, 0x00ff);
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
+ GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP |
+ 7 << GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT |
+ GLOBAL2_PRIO_OVERRIDE_FORCE_ARP |
+ 7 << GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT);
return 0;
}
-static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = REG_PORT(p);
- u16 val;
-
- /* MAC Forcing register: don't force link, speed, duplex
- * or flow control state to any particular values on physical
- * ports, but force the CPU port and all DSA ports to 1000 Mb/s
- * (100 Mb/s on 6085) full duplex.
- */
- if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
- if (ps->id == PORT_SWITCH_ID_6085)
- REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
- else
- REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
- else
- REG_WRITE(addr, 0x01, 0x0003);
-
- /* Port Control: disable Core Tag, disable Drop-on-Lock,
- * transmit frames unmodified, disable Header mode,
- * enable IGMP/MLD snoop, disable DoubleTag, disable VLAN
- * tunneling, determine priority by looking at 802.1p and
- * IP priority fields (IP prio has precedence), and set STP
- * state to Forwarding.
- *
- * If this is the upstream port for this switch, enable
- * forwarding of unknown unicasts, and enable DSA tagging
- * mode.
- *
- * If this is the link to another switch, use DSA tagging
- * mode, but do not enable forwarding of unknown unicasts.
- */
- val = 0x0433;
- if (p == dsa_upstream_port(ds)) {
- val |= 0x0104;
- /* On 6085, unknown multicast forward is controlled
- * here rather than in Port Control 2 register.
- */
- if (ps->id == PORT_SWITCH_ID_6085)
- val |= 0x0008;
- }
- if (ds->dsa_port_mask & (1 << p))
- val |= 0x0100;
- REG_WRITE(addr, 0x04, val);
-
- /* Port Control 2: don't force a good FCS, don't use
- * VLAN-based, source address-based or destination
- * address-based priority overrides, don't let the switch
- * add or strip 802.1q tags, don't discard tagged or
- * untagged frames on this port, do a destination address
- * lookup on received packets as usual, don't send a copy
- * of all transmitted/received frames on this port to the
- * CPU, and configure the upstream port number.
- *
- * If this is the upstream port for this switch, enable
- * forwarding of unknown multicast addresses.
- */
- if (ps->id == PORT_SWITCH_ID_6085)
- /* on 6085, bits 3:0 are reserved, bit 6 control ARP
- * mirroring, and multicast forward is handled in
- * Port Control register.
- */
- REG_WRITE(addr, 0x08, 0x0080);
- else {
- val = 0x0080 | dsa_upstream_port(ds);
- if (p == dsa_upstream_port(ds))
- val |= 0x0040;
- REG_WRITE(addr, 0x08, val);
- }
-
- /* Rate Control: disable ingress rate limiting. */
- REG_WRITE(addr, 0x09, 0x0000);
-
- /* Rate Control 2: disable egress rate limiting. */
- REG_WRITE(addr, 0x0a, 0x0000);
-
- /* Port Association Vector: when learning source addresses
- * of packets, add the address to the address database using
- * a port bitmap that has only the bit for this port set and
- * the other bits clear.
- */
- REG_WRITE(addr, 0x0b, 1 << p);
-
- /* Tag Remap: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x18, 0x3210);
-
- /* Tag Remap 2: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x19, 0x7654);
-
- return mv88e6xxx_setup_port_common(ds, p);
-}
-
static int mv88e6131_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int i;
int ret;
ret = mv88e6xxx_setup_common(ds);
@@ -234,6 +112,7 @@ static int mv88e6131_setup(struct dsa_switch *ds)
switch (ps->id) {
case PORT_SWITCH_ID_6085:
+ case PORT_SWITCH_ID_6185:
ps->num_ports = 10;
break;
case PORT_SWITCH_ID_6095:
@@ -251,19 +130,11 @@ static int mv88e6131_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
- /* @@@ initialise vtu and atu */
-
ret = mv88e6131_setup_global(ds);
if (ret < 0)
return ret;
- for (i = 0; i < ps->num_ports; i++) {
- ret = mv88e6131_setup_port(ds, i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
+ return mv88e6xxx_setup_ports(ds);
}
static int mv88e6131_port_to_phy_addr(struct dsa_switch *ds, int port)
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 9104efea0e3e..1c7808495a9d 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -1,4 +1,4 @@
-/* net/dsa/mv88e6171.c - Marvell 88e6171/8826172 switch chip support
+/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support
* Copyright (c) 2008-2009 Marvell Semiconductor
* Copyright (c) 2014 Claudio Leite <leitec@staticky.com>
*
@@ -29,8 +29,12 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
if (ret >= 0) {
if ((ret & 0xfff0) == PORT_SWITCH_ID_6171)
return "Marvell 88E6171";
- if ((ret & 0xfff0) == PORT_SWITCH_ID_6172)
- return "Marvell 88E6172";
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6175)
+ return "Marvell 88E6175";
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6350)
+ return "Marvell 88E6350";
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6351)
+ return "Marvell 88E6351";
}
return NULL;
@@ -38,196 +42,41 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
static int mv88e6171_setup_global(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u32 upstream_port = dsa_upstream_port(ds);
int ret;
- int i;
+ u32 reg;
+
+ ret = mv88e6xxx_setup_global(ds);
+ if (ret)
+ return ret;
/* Discard packets with excessive collisions, mask all
* interrupt sources, enable PPU.
*/
- REG_WRITE(REG_GLOBAL, 0x04, 0x6000);
-
- /* Set the default address aging time to 5 minutes, and
- * enable address learn messages to be sent to all message
- * ports.
- */
- REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
- /* Configure the priority mapping registers. */
- ret = mv88e6xxx_config_prio(ds);
- if (ret < 0)
- return ret;
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+ GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
/* Configure the upstream port, and configure the upstream
* port as the port to which ingress and egress monitor frames
* are to be sent.
*/
- if (REG_READ(REG_PORT(0), 0x03) == 0x1710)
- REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1111));
- else
- REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+ reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT;
+ REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
/* Disable remote management for now, and set the switch's
* DSA device number.
*/
- REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
-
- /* Send all frames with destination addresses matching
- * 01:80:c2:00:00:2x to the CPU port.
- */
- REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
-
- /* Send all frames with destination addresses matching
- * 01:80:c2:00:00:0x to the CPU port.
- */
- REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
- /* Disable the loopback filter, disable flow control
- * messages, disable flood broadcast override, disable
- * removing of provider tags, disable ATU age violation
- * interrupts, disable tag flow control, force flow
- * control priority to the highest, and send all special
- * multicast frames to the CPU at the highest priority.
- */
- REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
- /* Program the DSA routing table. */
- for (i = 0; i < 32; i++) {
- int nexthop;
-
- nexthop = 0x1f;
- if (i != ds->index && i < ds->dst->pd->nr_chips)
- nexthop = ds->pd->rtable[i] & 0x1f;
-
- REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
- }
-
- /* Clear all trunk masks. */
- for (i = 0; i < ps->num_ports; i++)
- REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
-
- /* Clear all trunk mappings. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
-
- /* Disable ingress rate limiting by resetting all ingress
- * rate limit registers to their initial state.
- */
- for (i = 0; i < 6; i++)
- REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
-
- /* Initialise cross-chip port VLAN table to reset defaults. */
- REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
-
- /* Clear the priority override table. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
-
- /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
return 0;
}
-static int mv88e6171_setup_port(struct dsa_switch *ds, int p)
-{
- int addr = REG_PORT(p);
- u16 val;
-
- /* MAC Forcing register: don't force link, speed, duplex
- * or flow control state to any particular values on physical
- * ports, but force the CPU port and all DSA ports to 1000 Mb/s
- * full duplex.
- */
- val = REG_READ(addr, 0x01);
- if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
- REG_WRITE(addr, 0x01, val | 0x003e);
- else
- REG_WRITE(addr, 0x01, val | 0x0003);
-
- /* Do not limit the period of time that this port can be
- * paused for by the remote end or the period of time that
- * this port can pause the remote end.
- */
- REG_WRITE(addr, 0x02, 0x0000);
-
- /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
- * disable Header mode, enable IGMP/MLD snooping, disable VLAN
- * tunneling, determine priority by looking at 802.1p and IP
- * priority fields (IP prio has precedence), and set STP state
- * to Forwarding.
- *
- * If this is the CPU link, use DSA or EDSA tagging depending
- * on which tagging mode was configured.
- *
- * If this is a link to another switch, use DSA tagging mode.
- *
- * If this is the upstream port for this switch, enable
- * forwarding of unknown unicasts and multicasts.
- */
- val = 0x0433;
- if (dsa_is_cpu_port(ds, p)) {
- if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
- val |= 0x3300;
- else
- val |= 0x0100;
- }
- if (ds->dsa_port_mask & (1 << p))
- val |= 0x0100;
- if (p == dsa_upstream_port(ds))
- val |= 0x000c;
- REG_WRITE(addr, 0x04, val);
-
- /* Port Control 2: don't force a good FCS, set the maximum
- * frame size to 10240 bytes, don't let the switch add or
- * strip 802.1q tags, don't discard tagged or untagged frames
- * on this port, do a destination address lookup on all
- * received packets as usual, disable ARP mirroring and don't
- * send a copy of all transmitted/received frames on this port
- * to the CPU.
- */
- REG_WRITE(addr, 0x08, 0x2080);
-
- /* Egress rate control: disable egress rate control. */
- REG_WRITE(addr, 0x09, 0x0001);
-
- /* Egress rate control 2: disable egress rate control. */
- REG_WRITE(addr, 0x0a, 0x0000);
-
- /* Port Association Vector: when learning source addresses
- * of packets, add the address to the address database using
- * a port bitmap that has only the bit for this port set and
- * the other bits clear.
- */
- REG_WRITE(addr, 0x0b, 1 << p);
-
- /* Port ATU control: disable limiting the number of address
- * database entries that this port is allowed to use.
- */
- REG_WRITE(addr, 0x0c, 0x0000);
-
- /* Priority Override: disable DA, SA and VTU priority override. */
- REG_WRITE(addr, 0x0d, 0x0000);
-
- /* Port Ethertype: use the Ethertype DSA Ethertype value. */
- REG_WRITE(addr, 0x0f, ETH_P_EDSA);
-
- /* Tag Remap: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x18, 0x3210);
-
- /* Tag Remap 2: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x19, 0x7654);
-
- return mv88e6xxx_setup_port_common(ds, p);
-}
-
static int mv88e6171_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int i;
int ret;
ret = mv88e6xxx_setup_common(ds);
@@ -240,44 +89,11 @@ static int mv88e6171_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
- /* @@@ initialise vtu and atu */
-
ret = mv88e6171_setup_global(ds);
if (ret < 0)
return ret;
- for (i = 0; i < ps->num_ports; i++) {
- if (!(dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i)))
- continue;
-
- ret = mv88e6171_setup_port(ds, i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static int mv88e6171_get_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- if (ps->id == PORT_SWITCH_ID_6172)
- return mv88e6xxx_get_eee(ds, port, e);
-
- return -EOPNOTSUPP;
-}
-
-static int mv88e6171_set_eee(struct dsa_switch *ds, int port,
- struct phy_device *phydev, struct ethtool_eee *e)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- if (ps->id == PORT_SWITCH_ID_6172)
- return mv88e6xxx_set_eee(ds, port, phydev, e);
-
- return -EOPNOTSUPP;
+ return mv88e6xxx_setup_ports(ds);
}
struct dsa_switch_driver mv88e6171_switch_driver = {
@@ -292,8 +108,6 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_sset_count = mv88e6xxx_get_sset_count,
- .set_eee = mv88e6171_set_eee,
- .get_eee = mv88e6171_get_eee,
#ifdef CONFIG_NET_DSA_HWMON
.get_temp = mv88e6xxx_get_temp,
#endif
@@ -308,4 +122,6 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
};
MODULE_ALIAS("platform:mv88e6171");
-MODULE_ALIAS("platform:mv88e6172");
+MODULE_ALIAS("platform:mv88e6175");
+MODULE_ALIAS("platform:mv88e6350");
+MODULE_ALIAS("platform:mv88e6351");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index 126c11b81e75..632815c10a40 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -32,6 +32,8 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
if (ret >= 0) {
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6172)
+ return "Marvell 88E6172";
if ((ret & 0xfff0) == PORT_SWITCH_ID_6176)
return "Marvell 88E6176";
if (ret == PORT_SWITCH_ID_6352_A0)
@@ -47,187 +49,37 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
static int mv88e6352_setup_global(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u32 upstream_port = dsa_upstream_port(ds);
int ret;
- int i;
+ u32 reg;
+
+ ret = mv88e6xxx_setup_global(ds);
+ if (ret)
+ return ret;
/* Discard packets with excessive collisions,
* mask all interrupt sources, enable PPU (bit 14, undocumented).
*/
- REG_WRITE(REG_GLOBAL, 0x04, 0x6000);
-
- /* Set the default address aging time to 5 minutes, and
- * enable address learn messages to be sent to all message
- * ports.
- */
- REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
- /* Configure the priority mapping registers. */
- ret = mv88e6xxx_config_prio(ds);
- if (ret < 0)
- return ret;
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+ GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
/* Configure the upstream port, and configure the upstream
* port as the port to which ingress and egress monitor frames
* are to be sent.
*/
- REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+ reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+ REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
/* Disable remote management for now, and set the switch's
* DSA device number.
*/
REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
- /* Send all frames with destination addresses matching
- * 01:80:c2:00:00:2x to the CPU port.
- */
- REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
-
- /* Send all frames with destination addresses matching
- * 01:80:c2:00:00:0x to the CPU port.
- */
- REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
- /* Disable the loopback filter, disable flow control
- * messages, disable flood broadcast override, disable
- * removing of provider tags, disable ATU age violation
- * interrupts, disable tag flow control, force flow
- * control priority to the highest, and send all special
- * multicast frames to the CPU at the highest priority.
- */
- REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
- /* Program the DSA routing table. */
- for (i = 0; i < 32; i++) {
- int nexthop = 0x1f;
-
- if (i != ds->index && i < ds->dst->pd->nr_chips)
- nexthop = ds->pd->rtable[i] & 0x1f;
-
- REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
- }
-
- /* Clear all trunk masks. */
- for (i = 0; i < 8; i++)
- REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7f);
-
- /* Clear all trunk mappings. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
-
- /* Disable ingress rate limiting by resetting all ingress
- * rate limit registers to their initial state.
- */
- for (i = 0; i < ps->num_ports; i++)
- REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
-
- /* Initialise cross-chip port VLAN table to reset defaults. */
- REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
-
- /* Clear the priority override table. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
-
- /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
-
return 0;
}
-static int mv88e6352_setup_port(struct dsa_switch *ds, int p)
-{
- int addr = REG_PORT(p);
- u16 val;
-
- /* MAC Forcing register: don't force link, speed, duplex
- * or flow control state to any particular values on physical
- * ports, but force the CPU port and all DSA ports to 1000 Mb/s
- * full duplex.
- */
- if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
- REG_WRITE(addr, 0x01, 0x003e);
- else
- REG_WRITE(addr, 0x01, 0x0003);
-
- /* Do not limit the period of time that this port can be
- * paused for by the remote end or the period of time that
- * this port can pause the remote end.
- */
- REG_WRITE(addr, 0x02, 0x0000);
-
- /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
- * disable Header mode, enable IGMP/MLD snooping, disable VLAN
- * tunneling, determine priority by looking at 802.1p and IP
- * priority fields (IP prio has precedence), and set STP state
- * to Forwarding.
- *
- * If this is the CPU link, use DSA or EDSA tagging depending
- * on which tagging mode was configured.
- *
- * If this is a link to another switch, use DSA tagging mode.
- *
- * If this is the upstream port for this switch, enable
- * forwarding of unknown unicasts and multicasts.
- */
- val = 0x0433;
- if (dsa_is_cpu_port(ds, p)) {
- if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
- val |= 0x3300;
- else
- val |= 0x0100;
- }
- if (ds->dsa_port_mask & (1 << p))
- val |= 0x0100;
- if (p == dsa_upstream_port(ds))
- val |= 0x000c;
- REG_WRITE(addr, 0x04, val);
-
- /* Port Control 2: don't force a good FCS, set the maximum
- * frame size to 10240 bytes, don't let the switch add or
- * strip 802.1q tags, don't discard tagged or untagged frames
- * on this port, do a destination address lookup on all
- * received packets as usual, disable ARP mirroring and don't
- * send a copy of all transmitted/received frames on this port
- * to the CPU.
- */
- REG_WRITE(addr, 0x08, 0x2080);
-
- /* Egress rate control: disable egress rate control. */
- REG_WRITE(addr, 0x09, 0x0001);
-
- /* Egress rate control 2: disable egress rate control. */
- REG_WRITE(addr, 0x0a, 0x0000);
-
- /* Port Association Vector: when learning source addresses
- * of packets, add the address to the address database using
- * a port bitmap that has only the bit for this port set and
- * the other bits clear.
- */
- REG_WRITE(addr, 0x0b, 1 << p);
-
- /* Port ATU control: disable limiting the number of address
- * database entries that this port is allowed to use.
- */
- REG_WRITE(addr, 0x0c, 0x0000);
-
- /* Priority Override: disable DA, SA and VTU priority override. */
- REG_WRITE(addr, 0x0d, 0x0000);
-
- /* Port Ethertype: use the Ethertype DSA Ethertype value. */
- REG_WRITE(addr, 0x0f, ETH_P_EDSA);
-
- /* Tag Remap: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x18, 0x3210);
-
- /* Tag Remap 2: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x19, 0x7654);
-
- return mv88e6xxx_setup_port_common(ds, p);
-}
-
#ifdef CONFIG_NET_DSA_HWMON
static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
@@ -292,7 +144,6 @@ static int mv88e6352_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
- int i;
ret = mv88e6xxx_setup_common(ds);
if (ret < 0)
@@ -306,19 +157,11 @@ static int mv88e6352_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
- /* @@@ initialise vtu and atu */
-
ret = mv88e6352_setup_global(ds);
if (ret < 0)
return ret;
- for (i = 0; i < ps->num_ports; i++) {
- ret = mv88e6352_setup_port(ds, i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
+ return mv88e6xxx_setup_ports(ds);
}
static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
@@ -552,3 +395,4 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
};
MODULE_ALIAS("platform:mv88e6352");
+MODULE_ALIAS("platform:mv88e6172");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index cf309aa92802..39530fa142b0 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -19,6 +19,34 @@
#include <net/dsa.h>
#include "mv88e6xxx.h"
+/* MDIO bus access can be nested in the case of PHYs connected to the
+ * internal MDIO bus of the switch, which is accessed via MDIO bus of
+ * the Ethernet interface. Avoid lockdep false positives by using
+ * mutex_lock_nested().
+ */
+static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
+{
+ int ret;
+
+ mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+ ret = bus->read(bus, addr, regnum);
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
+ u16 val)
+{
+ int ret;
+
+ mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+ ret = bus->write(bus, addr, regnum, val);
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
* use all 32 SMI bus addresses on its SMI bus, and all switch registers
* will be directly accessible on some {device address,register address}
@@ -33,7 +61,7 @@ static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
int i;
for (i = 0; i < 16; i++) {
- ret = mdiobus_read(bus, sw_addr, SMI_CMD);
+ ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
if (ret < 0)
return ret;
@@ -49,7 +77,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
int ret;
if (sw_addr == 0)
- return mdiobus_read(bus, addr, reg);
+ return mv88e6xxx_mdiobus_read(bus, addr, reg);
/* Wait for the bus to become free. */
ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
@@ -57,8 +85,8 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
return ret;
/* Transmit the read command. */
- ret = mdiobus_write(bus, sw_addr, SMI_CMD,
- SMI_CMD_OP_22_READ | (addr << 5) | reg);
+ ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
+ SMI_CMD_OP_22_READ | (addr << 5) | reg);
if (ret < 0)
return ret;
@@ -68,7 +96,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
return ret;
/* Read the data. */
- ret = mdiobus_read(bus, sw_addr, SMI_DATA);
+ ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
if (ret < 0)
return ret;
@@ -112,7 +140,7 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
int ret;
if (sw_addr == 0)
- return mdiobus_write(bus, addr, reg, val);
+ return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
/* Wait for the bus to become free. */
ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
@@ -120,13 +148,13 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
return ret;
/* Transmit the data to write. */
- ret = mdiobus_write(bus, sw_addr, SMI_DATA, val);
+ ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
if (ret < 0)
return ret;
/* Transmit the write command. */
- ret = mdiobus_write(bus, sw_addr, SMI_CMD,
- SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
+ ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
+ SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
if (ret < 0)
return ret;
@@ -165,24 +193,6 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
return ret;
}
-int mv88e6xxx_config_prio(struct dsa_switch *ds)
-{
- /* Configure the IP ToS mapping registers. */
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
-
- /* Configure the IEEE 802.1p priority mapping register. */
- REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
-
- return 0;
-}
-
int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
{
REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
@@ -217,20 +227,20 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
return 0;
}
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
{
if (addr >= 0)
- return mv88e6xxx_reg_read(ds, addr, regnum);
+ return _mv88e6xxx_reg_read(ds, addr, regnum);
return 0xffff;
}
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
u16 val)
{
if (addr >= 0)
- return mv88e6xxx_reg_write(ds, addr, regnum, val);
+ return _mv88e6xxx_reg_write(ds, addr, regnum, val);
return 0;
}
@@ -434,26 +444,113 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
}
}
+static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6031:
+ case PORT_SWITCH_ID_6061:
+ case PORT_SWITCH_ID_6035:
+ case PORT_SWITCH_ID_6065:
+ return true;
+ }
+ return false;
+}
+
+static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6092:
+ case PORT_SWITCH_ID_6095:
+ return true;
+ }
+ return false;
+}
+
+static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6046:
+ case PORT_SWITCH_ID_6085:
+ case PORT_SWITCH_ID_6096:
+ case PORT_SWITCH_ID_6097:
+ return true;
+ }
+ return false;
+}
+
+static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6123:
+ case PORT_SWITCH_ID_6161:
+ case PORT_SWITCH_ID_6165:
+ return true;
+ }
+ return false;
+}
+
+static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6121:
+ case PORT_SWITCH_ID_6122:
+ case PORT_SWITCH_ID_6152:
+ case PORT_SWITCH_ID_6155:
+ case PORT_SWITCH_ID_6182:
+ case PORT_SWITCH_ID_6185:
+ case PORT_SWITCH_ID_6108:
+ case PORT_SWITCH_ID_6131:
+ return true;
+ }
+ return false;
+}
+
+static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6171:
+ case PORT_SWITCH_ID_6175:
+ case PORT_SWITCH_ID_6350:
+ case PORT_SWITCH_ID_6351:
+ return true;
+ }
+ return false;
+}
+
static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
switch (ps->id) {
- case PORT_SWITCH_ID_6352:
case PORT_SWITCH_ID_6172:
case PORT_SWITCH_ID_6176:
+ case PORT_SWITCH_ID_6240:
+ case PORT_SWITCH_ID_6352:
return true;
}
return false;
}
-static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
+/* Must be called with SMI mutex held */
+static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
{
int ret;
int i;
for (i = 0; i < 10; i++) {
- ret = REG_READ(REG_GLOBAL, GLOBAL_STATS_OP);
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
return 0;
}
@@ -461,7 +558,8 @@ static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
return -ETIMEDOUT;
}
-static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
+/* Must be called with SMI mutex held */
+static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
{
int ret;
@@ -469,42 +567,45 @@ static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
port = (port + 1) << 5;
/* Snapshot the hardware statistics counters for this port. */
- REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_CAPTURE_PORT |
- GLOBAL_STATS_OP_HIST_RX_TX | port);
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_CAPTURE_PORT |
+ GLOBAL_STATS_OP_HIST_RX_TX | port);
+ if (ret < 0)
+ return ret;
/* Wait for the snapshotting to complete. */
- ret = mv88e6xxx_stats_wait(ds);
+ ret = _mv88e6xxx_stats_wait(ds);
if (ret < 0)
return ret;
return 0;
}
-static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
+/* Must be called with SMI mutex held */
+static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
{
u32 _val;
int ret;
*val = 0;
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_READ_CAPTURED |
- GLOBAL_STATS_OP_HIST_RX_TX | stat);
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_READ_CAPTURED |
+ GLOBAL_STATS_OP_HIST_RX_TX | stat);
if (ret < 0)
return;
- ret = mv88e6xxx_stats_wait(ds);
+ ret = _mv88e6xxx_stats_wait(ds);
if (ret < 0)
return;
- ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
if (ret < 0)
return;
_val = ret << 16;
- ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
if (ret < 0)
return;
@@ -587,11 +688,11 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
int ret;
int i;
- mutex_lock(&ps->stats_mutex);
+ mutex_lock(&ps->smi_mutex);
- ret = mv88e6xxx_stats_snapshot(ds, port);
+ ret = _mv88e6xxx_stats_snapshot(ds, port);
if (ret < 0) {
- mutex_unlock(&ps->stats_mutex);
+ mutex_unlock(&ps->smi_mutex);
return;
}
@@ -602,14 +703,14 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
u32 high = 0;
if (s->reg >= 0x100) {
- ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
- s->reg - 0x100);
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
+ s->reg - 0x100);
if (ret < 0)
goto error;
low = ret;
if (s->sizeof_stat == 4) {
- ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
- s->reg - 0x100 + 1);
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
+ s->reg - 0x100 + 1);
if (ret < 0)
goto error;
high = ret;
@@ -617,14 +718,14 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
data[i] = (((u64)high) << 16) | low;
continue;
}
- mv88e6xxx_stats_read(ds, s->reg, &low);
+ _mv88e6xxx_stats_read(ds, s->reg, &low);
if (s->sizeof_stat == 8)
- mv88e6xxx_stats_read(ds, s->reg + 1, &high);
+ _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
data[i] = (((u64)high) << 32) | low;
}
error:
- mutex_unlock(&ps->stats_mutex);
+ mutex_unlock(&ps->smi_mutex);
}
/* All the statistics in the table */
@@ -694,7 +795,7 @@ int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
*temp = 0;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
if (ret < 0)
@@ -727,19 +828,23 @@ int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
error:
_mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
#endif /* CONFIG_NET_DSA_HWMON */
-static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+/* Must be called with SMI lock held */
+static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
+ u16 mask)
{
unsigned long timeout = jiffies + HZ / 10;
while (time_before(jiffies, timeout)) {
int ret;
- ret = REG_READ(reg, offset);
+ ret = _mv88e6xxx_reg_read(ds, reg, offset);
+ if (ret < 0)
+ return ret;
if (!(ret & mask))
return 0;
@@ -748,10 +853,22 @@ static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
return -ETIMEDOUT;
}
-int mv88e6xxx_phy_wait(struct dsa_switch *ds)
+static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->smi_mutex);
+ ret = _mv88e6xxx_wait(ds, reg, offset, mask);
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
+static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
{
- return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
- GLOBAL2_SMI_OP_BUSY);
+ return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ GLOBAL2_SMI_OP_BUSY);
}
int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
@@ -767,56 +884,46 @@ int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
}
/* Must be called with SMI lock held */
-static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
-{
- unsigned long timeout = jiffies + HZ / 10;
-
- while (time_before(jiffies, timeout)) {
- int ret;
-
- ret = _mv88e6xxx_reg_read(ds, reg, offset);
- if (ret < 0)
- return ret;
- if (!(ret & mask))
- return 0;
-
- usleep_range(1000, 2000);
- }
- return -ETIMEDOUT;
-}
-
-/* Must be called with SMI lock held */
static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
{
return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
GLOBAL_ATU_OP_BUSY);
}
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
int regnum)
{
int ret;
- REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
- GLOBAL2_SMI_OP_22_READ | (addr << 5) | regnum);
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ GLOBAL2_SMI_OP_22_READ | (addr << 5) |
+ regnum);
+ if (ret < 0)
+ return ret;
- ret = mv88e6xxx_phy_wait(ds);
+ ret = _mv88e6xxx_phy_wait(ds);
if (ret < 0)
return ret;
- return REG_READ(REG_GLOBAL2, GLOBAL2_SMI_DATA);
+ return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
}
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
int regnum, u16 val)
{
- REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
- REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
- GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | regnum);
+ int ret;
- return mv88e6xxx_phy_wait(ds);
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
+ regnum);
+
+ return _mv88e6xxx_phy_wait(ds);
}
int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
@@ -824,7 +931,7 @@ int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int reg;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
if (reg < 0)
@@ -833,7 +940,7 @@ int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
e->eee_enabled = !!(reg & 0x0200);
e->tx_lpi_enabled = !!(reg & 0x0100);
- reg = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+ reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
if (reg < 0)
goto out;
@@ -841,7 +948,7 @@ int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
reg = 0;
out:
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return reg;
}
@@ -852,7 +959,7 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
int reg;
int ret;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
if (ret < 0)
@@ -866,7 +973,7 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
out:
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -1241,13 +1348,212 @@ static void mv88e6xxx_bridge_work(struct work_struct *work)
}
}
-int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port)
+static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret, fid;
+ u16 reg;
mutex_lock(&ps->smi_mutex);
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+ mv88e6xxx_6065_family(ds)) {
+ /* MAC Forcing register: don't force link, speed,
+ * duplex or flow control state to any particular
+ * values on physical ports, but force the CPU port
+ * and all DSA ports to their maximum bandwidth and
+ * full duplex.
+ */
+ reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
+ if (dsa_is_cpu_port(ds, port) ||
+ ds->dsa_port_mask & (1 << port)) {
+ reg |= PORT_PCS_CTRL_FORCE_LINK |
+ PORT_PCS_CTRL_LINK_UP |
+ PORT_PCS_CTRL_DUPLEX_FULL |
+ PORT_PCS_CTRL_FORCE_DUPLEX;
+ if (mv88e6xxx_6065_family(ds))
+ reg |= PORT_PCS_CTRL_100;
+ else
+ reg |= PORT_PCS_CTRL_1000;
+ } else {
+ reg |= PORT_PCS_CTRL_UNFORCED;
+ }
+
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_PCS_CTRL, reg);
+ if (ret)
+ goto abort;
+ }
+
+ /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+ * disable Header mode, enable IGMP/MLD snooping, disable VLAN
+ * tunneling, determine priority by looking at 802.1p and IP
+ * priority fields (IP prio has precedence), and set STP state
+ * to Forwarding.
+ *
+ * If this is the CPU link, use DSA or EDSA tagging depending
+ * on which tagging mode was configured.
+ *
+ * If this is a link to another switch, use DSA tagging mode.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown unicasts and multicasts.
+ */
+ reg = 0;
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
+ mv88e6xxx_6185_family(ds))
+ reg = PORT_CONTROL_IGMP_MLD_SNOOP |
+ PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
+ PORT_CONTROL_STATE_FORWARDING;
+ if (dsa_is_cpu_port(ds, port)) {
+ if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+ reg |= PORT_CONTROL_DSA_TAG;
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+ if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
+ reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
+ else
+ reg |= PORT_CONTROL_FRAME_MODE_DSA;
+ }
+
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
+ mv88e6xxx_6185_family(ds)) {
+ if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
+ reg |= PORT_CONTROL_EGRESS_ADD_TAG;
+ }
+ }
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds)) {
+ if (ds->dsa_port_mask & (1 << port))
+ reg |= PORT_CONTROL_FRAME_MODE_DSA;
+ if (port == dsa_upstream_port(ds))
+ reg |= PORT_CONTROL_FORWARD_UNKNOWN |
+ PORT_CONTROL_FORWARD_UNKNOWN_MC;
+ }
+ if (reg) {
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_CONTROL, reg);
+ if (ret)
+ goto abort;
+ }
+
+ /* Port Control 2: don't force a good FCS, set the maximum
+ * frame size to 10240 bytes, don't let the switch add or
+ * strip 802.1q tags, don't discard tagged or untagged frames
+ * on this port, do a destination address lookup on all
+ * received packets as usual, disable ARP mirroring and don't
+ * send a copy of all transmitted/received frames on this port
+ * to the CPU.
+ */
+ reg = 0;
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6095_family(ds))
+ reg = PORT_CONTROL_2_MAP_DA;
+
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds))
+ reg |= PORT_CONTROL_2_JUMBO_10240;
+
+ if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
+ /* Set the upstream port this port should use */
+ reg |= dsa_upstream_port(ds);
+ /* enable forwarding of unknown multicast addresses to
+ * the upstream port
+ */
+ if (port == dsa_upstream_port(ds))
+ reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
+ }
+
+ if (reg) {
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_CONTROL_2, reg);
+ if (ret)
+ goto abort;
+ }
+
+ /* Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
+ 1 << port);
+ if (ret)
+ goto abort;
+
+ /* Egress rate control 2: disable egress rate control. */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
+ 0x0000);
+ if (ret)
+ goto abort;
+
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+ /* Do not limit the period of time that this port can
+ * be paused for by the remote end or the period of
+ * time that this port can pause the remote end.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_PAUSE_CTRL, 0x0000);
+ if (ret)
+ goto abort;
+
+ /* Port ATU control: disable limiting the number of
+ * address database entries that this port is allowed
+ * to use.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_ATU_CONTROL, 0x0000);
+ /* Priority Override: disable DA, SA and VTU priority
+ * override.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_PRI_OVERRIDE, 0x0000);
+ if (ret)
+ goto abort;
+
+ /* Port Ethertype: use the Ethertype DSA Ethertype
+ * value.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_ETH_TYPE, ETH_P_EDSA);
+ if (ret)
+ goto abort;
+ /* Tag Remap: use an identity 802.1p prio -> switch
+ * prio mapping.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_TAG_REGMAP_0123, 0x3210);
+ if (ret)
+ goto abort;
+
+ /* Tag Remap 2: use an identity 802.1p prio -> switch
+ * prio mapping.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_TAG_REGMAP_4567, 0x7654);
+ if (ret)
+ goto abort;
+ }
+
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+ /* Rate Control: disable ingress rate limiting. */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_RATE_CONTROL, 0x0001);
+ if (ret)
+ goto abort;
+ }
+
/* Port Control 1: disable trunking, disable sending
* learning messages to this port.
*/
@@ -1281,13 +1587,25 @@ abort:
return ret;
}
+int mv88e6xxx_setup_ports(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+ int i;
+
+ for (i = 0; i < ps->num_ports; i++) {
+ ret = mv88e6xxx_setup_port(ds, i);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
int mv88e6xxx_setup_common(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
mutex_init(&ps->smi_mutex);
- mutex_init(&ps->stats_mutex);
- mutex_init(&ps->phy_mutex);
ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
@@ -1298,6 +1616,104 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds)
return 0;
}
+int mv88e6xxx_setup_global(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int i;
+
+ /* Set the default address aging time to 5 minutes, and
+ * enable address learn messages to be sent to all message
+ * ports.
+ */
+ REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
+
+ /* Configure the IP ToS mapping registers. */
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+
+ /* Configure the IEEE 802.1p priority mapping register. */
+ REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+
+ /* Send all frames with destination addresses matching
+ * 01:80:c2:00:00:0x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
+
+ /* Ignore removed tag data on doubly tagged packets, disable
+ * flow control messages, force flow control priority to the
+ * highest, and send all special multicast frames to the CPU
+ * port at the highest priority.
+ */
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
+ 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
+ GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
+
+ /* Program the DSA routing table. */
+ for (i = 0; i < 32; i++) {
+ int nexthop = 0x1f;
+
+ if (ds->pd->rtable &&
+ i != ds->index && i < ds->dst->pd->nr_chips)
+ nexthop = ds->pd->rtable[i] & 0x1f;
+
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
+ GLOBAL2_DEVICE_MAPPING_UPDATE |
+ (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
+ nexthop);
+ }
+
+ /* Clear all trunk masks. */
+ for (i = 0; i < 8; i++)
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
+ 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
+ ((1 << ps->num_ports) - 1));
+
+ /* Clear all trunk mappings. */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
+ GLOBAL2_TRUNK_MAPPING_UPDATE |
+ (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
+
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+ /* Send all frames with destination addresses matching
+ * 01:80:c2:00:00:2x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
+
+ /* Initialise cross-chip port VLAN table to reset
+ * defaults.
+ */
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
+
+ /* Clear the priority override table. */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
+ 0x8000 | (i << 8));
+ }
+
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+ /* Disable ingress rate limiting by resetting all
+ * ingress rate limit registers to their initial
+ * state.
+ */
+ for (i = 0; i < ps->num_ports; i++)
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
+ 0x9000 | (i << 8));
+ }
+
+ return 0;
+}
+
int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -1343,14 +1759,14 @@ int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
if (ret < 0)
goto error;
ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
error:
_mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -1360,7 +1776,7 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
if (ret < 0)
goto error;
@@ -1368,7 +1784,7 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
error:
_mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -1391,9 +1807,9 @@ mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
if (addr < 0)
return addr;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_read(ds, addr, regnum);
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -1407,9 +1823,9 @@ mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
if (addr < 0)
return addr;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -1423,9 +1839,9 @@ mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
if (addr < 0)
return addr;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -1440,9 +1856,9 @@ mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
if (addr < 0)
return addr;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index e045154f3364..e10ccdb4ffbc 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -40,9 +40,31 @@
#define PORT_STATUS_TX_PAUSED BIT(5)
#define PORT_STATUS_FLOW_CTRL BIT(4)
#define PORT_PCS_CTRL 0x01
+#define PORT_PCS_CTRL_FC BIT(7)
+#define PORT_PCS_CTRL_FORCE_FC BIT(6)
+#define PORT_PCS_CTRL_LINK_UP BIT(5)
+#define PORT_PCS_CTRL_FORCE_LINK BIT(4)
+#define PORT_PCS_CTRL_DUPLEX_FULL BIT(3)
+#define PORT_PCS_CTRL_FORCE_DUPLEX BIT(2)
+#define PORT_PCS_CTRL_10 0x00
+#define PORT_PCS_CTRL_100 0x01
+#define PORT_PCS_CTRL_1000 0x02
+#define PORT_PCS_CTRL_UNFORCED 0x03
+#define PORT_PAUSE_CTRL 0x02
#define PORT_SWITCH_ID 0x03
+#define PORT_SWITCH_ID_6031 0x0310
+#define PORT_SWITCH_ID_6035 0x0350
+#define PORT_SWITCH_ID_6046 0x0480
+#define PORT_SWITCH_ID_6061 0x0610
+#define PORT_SWITCH_ID_6065 0x0650
#define PORT_SWITCH_ID_6085 0x04a0
+#define PORT_SWITCH_ID_6092 0x0970
#define PORT_SWITCH_ID_6095 0x0950
+#define PORT_SWITCH_ID_6096 0x0980
+#define PORT_SWITCH_ID_6097 0x0990
+#define PORT_SWITCH_ID_6108 0x1070
+#define PORT_SWITCH_ID_6121 0x1040
+#define PORT_SWITCH_ID_6122 0x1050
#define PORT_SWITCH_ID_6123 0x1210
#define PORT_SWITCH_ID_6123_A1 0x1212
#define PORT_SWITCH_ID_6123_A2 0x1213
@@ -58,13 +80,38 @@
#define PORT_SWITCH_ID_6165_A2 0x1653
#define PORT_SWITCH_ID_6171 0x1710
#define PORT_SWITCH_ID_6172 0x1720
+#define PORT_SWITCH_ID_6175 0x1750
#define PORT_SWITCH_ID_6176 0x1760
#define PORT_SWITCH_ID_6182 0x1a60
#define PORT_SWITCH_ID_6185 0x1a70
+#define PORT_SWITCH_ID_6240 0x2400
+#define PORT_SWITCH_ID_6320 0x1250
+#define PORT_SWITCH_ID_6350 0x3710
+#define PORT_SWITCH_ID_6351 0x3750
#define PORT_SWITCH_ID_6352 0x3520
#define PORT_SWITCH_ID_6352_A0 0x3521
#define PORT_SWITCH_ID_6352_A1 0x3522
#define PORT_CONTROL 0x04
+#define PORT_CONTROL_USE_CORE_TAG BIT(15)
+#define PORT_CONTROL_DROP_ON_LOCK BIT(14)
+#define PORT_CONTROL_EGRESS_UNMODIFIED (0x0 << 12)
+#define PORT_CONTROL_EGRESS_UNTAGGED (0x1 << 12)
+#define PORT_CONTROL_EGRESS_TAGGED (0x2 << 12)
+#define PORT_CONTROL_EGRESS_ADD_TAG (0x3 << 12)
+#define PORT_CONTROL_HEADER BIT(11)
+#define PORT_CONTROL_IGMP_MLD_SNOOP BIT(10)
+#define PORT_CONTROL_DOUBLE_TAG BIT(9)
+#define PORT_CONTROL_FRAME_MODE_NORMAL (0x0 << 8)
+#define PORT_CONTROL_FRAME_MODE_DSA (0x1 << 8)
+#define PORT_CONTROL_FRAME_MODE_PROVIDER (0x2 << 8)
+#define PORT_CONTROL_FRAME_ETHER_TYPE_DSA (0x3 << 8)
+#define PORT_CONTROL_DSA_TAG BIT(8)
+#define PORT_CONTROL_VLAN_TUNNEL BIT(7)
+#define PORT_CONTROL_TAG_IF_BOTH BIT(6)
+#define PORT_CONTROL_USE_IP BIT(5)
+#define PORT_CONTROL_USE_TAG BIT(4)
+#define PORT_CONTROL_FORWARD_UNKNOWN_MC BIT(3)
+#define PORT_CONTROL_FORWARD_UNKNOWN BIT(2)
#define PORT_CONTROL_STATE_MASK 0x03
#define PORT_CONTROL_STATE_DISABLED 0x00
#define PORT_CONTROL_STATE_BLOCKING 0x01
@@ -74,15 +121,32 @@
#define PORT_BASE_VLAN 0x06
#define PORT_DEFAULT_VLAN 0x07
#define PORT_CONTROL_2 0x08
+#define PORT_CONTROL_2_IGNORE_FCS BIT(15)
+#define PORT_CONTROL_2_VTU_PRI_OVERRIDE BIT(14)
+#define PORT_CONTROL_2_SA_PRIO_OVERRIDE BIT(13)
+#define PORT_CONTROL_2_DA_PRIO_OVERRIDE BIT(12)
+#define PORT_CONTROL_2_JUMBO_1522 (0x00 << 12)
+#define PORT_CONTROL_2_JUMBO_2048 (0x01 << 12)
+#define PORT_CONTROL_2_JUMBO_10240 (0x02 << 12)
+#define PORT_CONTROL_2_DISCARD_TAGGED BIT(9)
+#define PORT_CONTROL_2_DISCARD_UNTAGGED BIT(8)
+#define PORT_CONTROL_2_MAP_DA BIT(7)
+#define PORT_CONTROL_2_DEFAULT_FORWARD BIT(6)
+#define PORT_CONTROL_2_FORWARD_UNKNOWN BIT(6)
+#define PORT_CONTROL_2_EGRESS_MONITOR BIT(5)
+#define PORT_CONTROL_2_INGRESS_MONITOR BIT(4)
#define PORT_RATE_CONTROL 0x09
#define PORT_RATE_CONTROL_2 0x0a
#define PORT_ASSOC_VECTOR 0x0b
+#define PORT_ATU_CONTROL 0x0c
+#define PORT_PRI_OVERRIDE 0x0d
+#define PORT_ETH_TYPE 0x0f
#define PORT_IN_DISCARD_LO 0x10
#define PORT_IN_DISCARD_HI 0x11
#define PORT_IN_FILTERED 0x12
#define PORT_OUT_FILTERED 0x13
-#define PORT_TAG_REGMAP_0123 0x19
-#define PORT_TAG_REGMAP_4567 0x1a
+#define PORT_TAG_REGMAP_0123 0x18
+#define PORT_TAG_REGMAP_4567 0x19
#define REG_GLOBAL 0x1b
#define GLOBAL_STATUS 0x00
@@ -102,7 +166,7 @@
#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13) /* 6352 */
#define GLOBAL_CONTROL_SCHED_PRIO BIT(11) /* 6152 */
#define GLOBAL_CONTROL_MAX_FRAME_1632 BIT(10) /* 6152 */
-#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9) /* 6152 */
+#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9) /* 6152 */
#define GLOBAL_CONTROL_DEVICE_EN BIT(7)
#define GLOBAL_CONTROL_STATS_DONE_EN BIT(6)
#define GLOBAL_CONTROL_VTU_PROBLEM_EN BIT(5)
@@ -117,6 +181,7 @@
#define GLOBAL_VTU_DATA_4_7 0x08
#define GLOBAL_VTU_DATA_8_11 0x09
#define GLOBAL_ATU_CONTROL 0x0a
+#define GLOBAL_ATU_CONTROL_LEARN2ALL BIT(3)
#define GLOBAL_ATU_OP 0x0b
#define GLOBAL_ATU_OP_BUSY BIT(15)
#define GLOBAL_ATU_OP_NOP (0 << 12)
@@ -151,7 +216,15 @@
#define GLOBAL_IEEE_PRI 0x18
#define GLOBAL_CORE_TAG_TYPE 0x19
#define GLOBAL_MONITOR_CONTROL 0x1a
+#define GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT 12
+#define GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT 8
+#define GLOBAL_MONITOR_CONTROL_ARP_SHIFT 4
+#define GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT 0
+#define GLOBAL_MONITOR_CONTROL_ARP_DISABLED (0xf0)
#define GLOBAL_CONTROL_2 0x1c
+#define GLOBAL_CONTROL_2_NO_CASCADE 0xe000
+#define GLOBAL_CONTROL_2_MULTIPLE_CASCADE 0xf000
+
#define GLOBAL_STATS_OP 0x1d
#define GLOBAL_STATS_OP_BUSY BIT(15)
#define GLOBAL_STATS_OP_NOP (0 << 12)
@@ -172,9 +245,20 @@
#define GLOBAL2_MGMT_EN_0X 0x03
#define GLOBAL2_FLOW_CONTROL 0x04
#define GLOBAL2_SWITCH_MGMT 0x05
+#define GLOBAL2_SWITCH_MGMT_USE_DOUBLE_TAG_DATA BIT(15)
+#define GLOBAL2_SWITCH_MGMT_PREVENT_LOOPS BIT(14)
+#define GLOBAL2_SWITCH_MGMT_FLOW_CONTROL_MSG BIT(13)
+#define GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI BIT(7)
+#define GLOBAL2_SWITCH_MGMT_RSVD2CPU BIT(3)
#define GLOBAL2_DEVICE_MAPPING 0x06
+#define GLOBAL2_DEVICE_MAPPING_UPDATE BIT(15)
+#define GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT 8
#define GLOBAL2_TRUNK_MASK 0x07
+#define GLOBAL2_TRUNK_MASK_UPDATE BIT(15)
+#define GLOBAL2_TRUNK_MASK_NUM_SHIFT 12
#define GLOBAL2_TRUNK_MAPPING 0x08
+#define GLOBAL2_TRUNK_MAPPING_UPDATE BIT(15)
+#define GLOBAL2_TRUNK_MAPPING_ID_SHIFT 11
#define GLOBAL2_INGRESS_OP 0x09
#define GLOBAL2_INGRESS_DATA 0x0a
#define GLOBAL2_PVT_ADDR 0x0b
@@ -183,6 +267,10 @@
#define GLOBAL2_SWITCH_MAC_BUSY BIT(15)
#define GLOBAL2_ATU_STATS 0x0e
#define GLOBAL2_PRIO_OVERRIDE 0x0f
+#define GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP BIT(7)
+#define GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT 4
+#define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP BIT(3)
+#define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT 0
#define GLOBAL2_EEPROM_OP 0x14
#define GLOBAL2_EEPROM_OP_BUSY BIT(15)
#define GLOBAL2_EEPROM_OP_LOAD BIT(11)
@@ -260,14 +348,14 @@ struct mv88e6xxx_hw_stat {
};
int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active);
-int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port);
+int mv88e6xxx_setup_ports(struct dsa_switch *ds);
int mv88e6xxx_setup_common(struct dsa_switch *ds);
+int mv88e6xxx_setup_global(struct dsa_switch *ds);
int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg);
int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
int reg, u16 val);
int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
-int mv88e6xxx_config_prio(struct dsa_switch *ds);
int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum);
@@ -289,7 +377,6 @@ int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
struct ethtool_regs *regs, void *_p);
int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
-int mv88e6xxx_phy_wait(struct dsa_switch *ds);
int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index eadcb053807e..9a8308553520 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -34,6 +34,7 @@ source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/calxeda/Kconfig"
+source "drivers/net/ethernet/cavium/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 1367afcd0a8b..4395d99115a0 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_NET_BFIN) += adi/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 426916036151..acd53173fcc0 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -179,10 +179,8 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA
+ depends on ((OF_NET && OF_ADDRESS) || ACPI) && HAS_IOMEM && HAS_DMA
depends on ARM64 || COMPILE_TEST
- select PHYLIB
- select AMD_XGBE_PHY
select BITREVERSE
select CRC32
select PTP_1588_CLOCK
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 34c28aac767f..b6fa89102526 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -857,6 +857,48 @@
*/
#define PCS_MMD_SELECT 0xff
+/* SerDes integration register offsets */
+#define SIR0_KR_RT_1 0x002c
+#define SIR0_STATUS 0x0040
+#define SIR1_SPEED 0x0000
+
+/* SerDes integration register entry bit positions and sizes */
+#define SIR0_KR_RT_1_RESET_INDEX 11
+#define SIR0_KR_RT_1_RESET_WIDTH 1
+#define SIR0_STATUS_RX_READY_INDEX 0
+#define SIR0_STATUS_RX_READY_WIDTH 1
+#define SIR0_STATUS_TX_READY_INDEX 8
+#define SIR0_STATUS_TX_READY_WIDTH 1
+#define SIR1_SPEED_CDR_RATE_INDEX 12
+#define SIR1_SPEED_CDR_RATE_WIDTH 4
+#define SIR1_SPEED_DATARATE_INDEX 4
+#define SIR1_SPEED_DATARATE_WIDTH 2
+#define SIR1_SPEED_PLLSEL_INDEX 3
+#define SIR1_SPEED_PLLSEL_WIDTH 1
+#define SIR1_SPEED_RATECHANGE_INDEX 6
+#define SIR1_SPEED_RATECHANGE_WIDTH 1
+#define SIR1_SPEED_TXAMP_INDEX 8
+#define SIR1_SPEED_TXAMP_WIDTH 4
+#define SIR1_SPEED_WORDMODE_INDEX 0
+#define SIR1_SPEED_WORDMODE_WIDTH 3
+
+/* SerDes RxTx register offsets */
+#define RXTX_REG6 0x0018
+#define RXTX_REG20 0x0050
+#define RXTX_REG22 0x0058
+#define RXTX_REG114 0x01c8
+#define RXTX_REG129 0x0204
+
+/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX 8
+#define RXTX_REG6_RESETB_RXD_WIDTH 1
+#define RXTX_REG20_BLWC_ENA_INDEX 2
+#define RXTX_REG20_BLWC_ENA_WIDTH 1
+#define RXTX_REG114_PQ_REG_INDEX 9
+#define RXTX_REG114_PQ_REG_WIDTH 7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
+
/* Descriptor/Packet entry bit positions and sizes */
#define RX_PACKET_ERRORS_CRC_INDEX 2
#define RX_PACKET_ERRORS_CRC_WIDTH 1
@@ -973,10 +1015,47 @@
#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
/* MDIO undefined or vendor specific registers */
+#ifndef MDIO_PMA_10GBR_PMD_CTRL
+#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
+#endif
+
+#ifndef MDIO_PMA_10GBR_FECCTRL
+#define MDIO_PMA_10GBR_FECCTRL 0x00ab
+#endif
+
+#ifndef MDIO_AN_XNP
+#define MDIO_AN_XNP 0x0016
+#endif
+
+#ifndef MDIO_AN_LPX
+#define MDIO_AN_LPX 0x0019
+#endif
+
#ifndef MDIO_AN_COMP_STAT
#define MDIO_AN_COMP_STAT 0x0030
#endif
+#ifndef MDIO_AN_INTMASK
+#define MDIO_AN_INTMASK 0x8001
+#endif
+
+#ifndef MDIO_AN_INT
+#define MDIO_AN_INT 0x8002
+#endif
+
+#ifndef MDIO_CTRL1_SPEED1G
+#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+#endif
+
+/* MDIO mask values */
+#define XGBE_XNP_MCF_NULL_MESSAGE 0x001
+#define XGBE_XNP_ACK_PROCESSED BIT(12)
+#define XGBE_XNP_MP_FORMATTED BIT(13)
+#define XGBE_XNP_NP_EXCHANGE BIT(15)
+
+#define XGBE_KR_TRAINING_START BIT(0)
+#define XGBE_KR_TRAINING_ENABLE BIT(1)
+
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
@@ -1119,6 +1198,82 @@ do { \
ioread32((_pdata)->xpcs_regs + (_off))
/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes integration registers.
+ */
+#define XSIR_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XSIR0_IOREAD(_pdata, _reg) \
+ ioread16((_pdata)->sir0_regs + _reg)
+
+#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR0_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR0_IOWRITE(_pdata, _reg, _val) \
+ iowrite16((_val), (_pdata)->sir0_regs + _reg)
+
+#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR0_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+#define XSIR1_IOREAD(_pdata, _reg) \
+ ioread16((_pdata)->sir1_regs + _reg)
+
+#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR1_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR1_IOWRITE(_pdata, _reg, _val) \
+ iowrite16((_val), (_pdata)->sir1_regs + _reg)
+
+#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR1_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes RxTx registers.
+ */
+#define XRXTX_IOREAD(_pdata, _reg) \
+ ioread16((_pdata)->rxtx_regs + _reg)
+
+#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XRXTX_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XRXTX_IOWRITE(_pdata, _reg, _val) \
+ iowrite16((_val), (_pdata)->rxtx_regs + _reg)
+
+#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XRXTX_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
* using MDIO. Different from above because of the use of standardized
* Linux include values. No shifting is performed with the bit
* operations, everything works on mask values.
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index 8a50b01c2686..a6b9899e285f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -150,9 +150,12 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
tc_ets = 0;
tc_ets_weight = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- DBGPR(" TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
- ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]);
- DBGPR(" PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]);
+ netif_dbg(pdata, drv, netdev,
+ "TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
+ ets->tc_tx_bw[i], ets->tc_rx_bw[i],
+ ets->tc_tsa[i]);
+ netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i,
+ ets->prio_tc[i]);
if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
(i >= pdata->hw_feat.tc_cnt))
@@ -214,8 +217,9 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- DBGPR(" cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n",
- pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
+ netif_dbg(pdata, drv, netdev,
+ "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
+ pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
if (!pdata->pfc) {
pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
@@ -238,9 +242,10 @@ static u8 xgbe_dcb_getdcbx(struct net_device *netdev)
static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
u8 support = xgbe_dcb_getdcbx(netdev);
- DBGPR(" DCBX=%#hhx\n", dcbx);
+ netif_dbg(pdata, drv, netdev, "DCBX=%#hhx\n", dcbx);
if (dcbx & ~support)
return 1;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index d81fc6bd4759..dd03ad865caf 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -208,8 +208,9 @@ static int xgbe_init_ring(struct xgbe_prv_data *pdata,
if (!ring->rdata)
return -ENOMEM;
- DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
- ring->rdesc, ring->rdesc_dma, ring->rdata);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
+ ring->rdesc, &ring->rdesc_dma, ring->rdata);
DBGPR("<--xgbe_init_ring\n");
@@ -226,7 +227,9 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
- DBGPR(" %s - tx_ring:\n", channel->name);
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
+ channel->name);
+
ret = xgbe_init_ring(pdata, channel->tx_ring,
pdata->tx_desc_count);
if (ret) {
@@ -235,12 +238,14 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
goto err_ring;
}
- DBGPR(" %s - rx_ring:\n", channel->name);
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
+ channel->name);
+
ret = xgbe_init_ring(pdata, channel->rx_ring,
pdata->rx_desc_count);
if (ret) {
netdev_alert(pdata->netdev,
- "error initializing Tx ring\n");
+ "error initializing Rx ring\n");
goto err_ring;
}
}
@@ -476,8 +481,6 @@ static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
if (rdata->state_saved) {
rdata->state_saved = 0;
- rdata->state.incomplete = 0;
- rdata->state.context_next = 0;
rdata->state.skb = NULL;
rdata->state.len = 0;
rdata->state.error = 0;
@@ -518,8 +521,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
if (tso) {
- DBGPR(" TSO packet\n");
-
/* Map the TSO header */
skb_dma = dma_map_single(pdata->dev, skb->data,
packet->header_len, DMA_TO_DEVICE);
@@ -529,6 +530,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = packet->header_len;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb header: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, packet->header_len);
offset = packet->header_len;
@@ -550,8 +554,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = len;
- DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
- cur_index, skb_dma, len);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb data: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, len);
datalen -= len;
offset += len;
@@ -563,7 +568,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- DBGPR(" mapping frag %u\n", i);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "mapping frag %u\n", i);
frag = &skb_shinfo(skb)->frags[i];
offset = 0;
@@ -582,8 +588,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = len;
rdata->mapped_as_page = 1;
- DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
- cur_index, skb_dma, len);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb frag: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, len);
datalen -= len;
offset += len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 21d9497518fd..506e832c9e9a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -710,7 +710,8 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
return 0;
- DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
+ netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
+ enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
return 0;
@@ -724,7 +725,8 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
return 0;
- DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
+ netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
+ enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
return 0;
@@ -749,8 +751,9 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
mac_addr[0] = ha->addr[4];
mac_addr[1] = ha->addr[5];
- DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr,
- *mac_reg);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "adding mac address %pM at %#x\n",
+ ha->addr, *mac_reg);
XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
}
@@ -907,23 +910,6 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
- /* If the PCS is changing modes, match the MAC speed to it */
- if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
- ((mmd_address & 0xffff) == MDIO_CTRL2)) {
- struct phy_device *phydev = pdata->phydev;
-
- if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
- /* KX mode */
- if (phydev->supported & SUPPORTED_1000baseKX_Full)
- xgbe_set_gmii_speed(pdata);
- else
- xgbe_set_gmii_2500_speed(pdata);
- } else {
- /* KR mode */
- xgbe_set_xgmii_speed(pdata);
- }
- }
-
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
* register sets. This requires accessing of the PCS register in two
@@ -1322,7 +1308,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
- DBGPR(" TC%u using SP\n", i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TC%u using SP\n", i);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
MTL_TSA_SP);
break;
@@ -1330,7 +1317,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
weight = total_weight * ets->tc_tx_bw[i] / 100;
weight = clamp(weight, min_weight, total_weight);
- DBGPR(" TC%u using DWRR (weight %u)\n", i, weight);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TC%u using DWRR (weight %u)\n", i, weight);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
MTL_TSA_ETS);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
@@ -1359,7 +1347,8 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
}
mask &= 0xff;
- DBGPR(" TC%u PFC mask=%#x\n", tc, mask);
+ netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n",
+ tc, mask);
reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
reg_val = XGMAC_IOREAD(pdata, reg);
@@ -1457,8 +1446,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
/* Create a context descriptor if this is a TSO packet */
if (tso_context || vlan_context) {
if (tso_context) {
- DBGPR(" TSO context descriptor, mss=%u\n",
- packet->mss);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "TSO context descriptor, mss=%u\n",
+ packet->mss);
/* Set the MSS size */
XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
@@ -1476,8 +1466,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
}
if (vlan_context) {
- DBGPR(" VLAN context descriptor, ctag=%u\n",
- packet->vlan_ctag);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "VLAN context descriptor, ctag=%u\n",
+ packet->vlan_ctag);
/* Mark it as a CONTEXT descriptor */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
@@ -1533,6 +1524,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
packet->tcp_payload_len);
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
packet->tcp_header_len / 4);
+
+ pdata->ext_stats.tx_tso_packets++;
} else {
/* Enable CRC and Pad Insertion */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
@@ -1594,9 +1587,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
rdesc = rdata->rdesc;
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
-#ifdef XGMAC_ENABLE_TX_DESC_DUMP
- xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
-#endif
+ if (netif_msg_tx_queued(pdata))
+ xgbe_dump_tx_desc(pdata, ring, start_index,
+ packet->rdesc_count, 1);
/* Make sure ownership is written to the descriptor */
dma_wmb();
@@ -1618,11 +1611,12 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
static int xgbe_dev_read(struct xgbe_channel *channel)
{
+ struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data;
- struct net_device *netdev = channel->pdata->netdev;
+ struct net_device *netdev = pdata->netdev;
unsigned int err, etlt, l34t;
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
@@ -1637,9 +1631,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
/* Make sure descriptor fields are read after reading the OWN bit */
dma_rmb();
-#ifdef XGMAC_ENABLE_RX_DESC_DUMP
- xgbe_dump_rx_desc(ring, rdesc, ring->cur);
-#endif
+ if (netif_msg_rx_status(pdata))
+ xgbe_dump_rx_desc(pdata, ring, ring->cur);
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
/* Timestamp Context Descriptor */
@@ -1661,9 +1654,12 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
CONTEXT_NEXT, 1);
/* Get the header length */
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
RX_NORMAL_DESC2, HL);
+ if (rdata->rx.hdr_len)
+ pdata->ext_stats.rx_split_header_packets++;
+ }
/* Get the RSS hash */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
@@ -1700,14 +1696,14 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
INCOMPLETE, 0);
/* Set checksum done indicator as appropriate */
- if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
+ if (netdev->features & NETIF_F_RXCSUM)
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
CSUM_DONE, 1);
/* Check for errors (only valid in last descriptor) */
err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
- DBGPR(" err=%u, etlt=%#x\n", err, etlt);
+ netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
if (!err || !etlt) {
/* No error if err is 0 or etlt is 0 */
@@ -1718,7 +1714,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
RX_NORMAL_DESC0,
OVT);
- DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
+ netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
+ packet->vlan_ctag);
}
} else {
if ((etlt == 0x05) || (etlt == 0x06))
@@ -2026,9 +2023,9 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
- netdev_notice(pdata->netdev,
- "%d Tx hardware queues, %d byte fifo per queue\n",
- pdata->tx_q_count, ((fifo_size + 1) * 256));
+ netif_info(pdata, drv, pdata->netdev,
+ "%d Tx hardware queues, %d byte fifo per queue\n",
+ pdata->tx_q_count, ((fifo_size + 1) * 256));
}
static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
@@ -2042,9 +2039,9 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
- netdev_notice(pdata->netdev,
- "%d Rx hardware queues, %d byte fifo per queue\n",
- pdata->rx_q_count, ((fifo_size + 1) * 256));
+ netif_info(pdata, drv, pdata->netdev,
+ "%d Rx hardware queues, %d byte fifo per queue\n",
+ pdata->rx_q_count, ((fifo_size + 1) * 256));
}
static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
@@ -2063,14 +2060,16 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
for (j = 0; j < qptc; j++) {
- DBGPR(" TXq%u mapped to TC%u\n", queue, i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TXq%u mapped to TC%u\n", queue, i);
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
Q2TCMAP, i);
pdata->q2tc_map[queue++] = i;
}
if (i < qptc_extra) {
- DBGPR(" TXq%u mapped to TC%u\n", queue, i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TXq%u mapped to TC%u\n", queue, i);
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
Q2TCMAP, i);
pdata->q2tc_map[queue++] = i;
@@ -2088,13 +2087,15 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
for (i = 0, prio = 0; i < prio_queues;) {
mask = 0;
for (j = 0; j < ppq; j++) {
- DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "PRIO%u mapped to RXq%u\n", prio, i);
mask |= (1 << prio);
pdata->prio2q_map[prio++] = i;
}
if (i < ppq_extra) {
- DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "PRIO%u mapped to RXq%u\n", prio, i);
mask |= (1 << prio);
pdata->prio2q_map[prio++] = i;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 9fd6c69a8bac..1e9c28d19ef8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -183,9 +183,10 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
channel->rx_ring = rx_ring++;
}
- DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
- channel->name, channel->queue_index, channel->dma_regs,
- channel->dma_irq, channel->tx_ring, channel->rx_ring);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
+ channel->name, channel->dma_regs, channel->dma_irq,
+ channel->tx_ring, channel->rx_ring);
}
pdata->channel = channel_mem;
@@ -235,7 +236,8 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
struct xgbe_prv_data *pdata = channel->pdata;
if (count > xgbe_tx_avail_desc(ring)) {
- DBGPR(" Tx queue stopped, not enough descriptors available\n");
+ netif_info(pdata, drv, pdata->netdev,
+ "Tx queue stopped, not enough descriptors available\n");
netif_stop_subqueue(pdata->netdev, channel->queue_index);
ring->tx.queue_stopped = 1;
@@ -330,7 +332,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (!dma_isr)
goto isr_done;
- DBGPR(" DMA_ISR = %08x\n", dma_isr);
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
for (i = 0; i < pdata->channel_count; i++) {
if (!(dma_isr & (1 << i)))
@@ -339,7 +341,8 @@ static irqreturn_t xgbe_isr(int irq, void *data)
channel = pdata->channel + i;
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
- DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
+ i, dma_ch_isr);
/* The TI or RI interrupt bits may still be set even if using
* per channel DMA interrupts. Check to be sure those are not
@@ -386,8 +389,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
}
}
- DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
-
isr_done:
return IRQ_HANDLED;
}
@@ -436,43 +437,61 @@ static void xgbe_tx_timer(unsigned long data)
DBGPR("<--xgbe_tx_timer\n");
}
-static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
+static void xgbe_service(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ service_work);
+
+ pdata->phy_if.phy_status(pdata);
+}
+
+static void xgbe_service_timer(unsigned long data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+ schedule_work(&pdata->service_work);
+
+ mod_timer(&pdata->service_timer, jiffies + HZ);
+}
+
+static void xgbe_init_timers(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
unsigned int i;
- DBGPR("-->xgbe_init_tx_timers\n");
+ setup_timer(&pdata->service_timer, xgbe_service_timer,
+ (unsigned long)pdata);
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->tx_ring)
break;
- DBGPR(" %s adding tx timer\n", channel->name);
setup_timer(&channel->tx_timer, xgbe_tx_timer,
(unsigned long)channel);
}
+}
- DBGPR("<--xgbe_init_tx_timers\n");
+static void xgbe_start_timers(struct xgbe_prv_data *pdata)
+{
+ mod_timer(&pdata->service_timer, jiffies + HZ);
}
-static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
+static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
unsigned int i;
- DBGPR("-->xgbe_stop_tx_timers\n");
+ del_timer_sync(&pdata->service_timer);
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->tx_ring)
break;
- DBGPR(" %s deleting tx timer\n", channel->name);
del_timer_sync(&channel->tx_timer);
}
-
- DBGPR("<--xgbe_stop_tx_timers\n");
}
void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
@@ -512,6 +531,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
RXFIFOSIZE);
hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
TXFIFOSIZE);
+ hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
@@ -759,112 +779,12 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_free_rx_data\n");
}
-static void xgbe_adjust_link(struct net_device *netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct phy_device *phydev = pdata->phydev;
- int new_state = 0;
-
- if (!phydev)
- return;
-
- if (phydev->link) {
- /* Flow control support */
- if (pdata->pause_autoneg) {
- if (phydev->pause || phydev->asym_pause) {
- pdata->tx_pause = 1;
- pdata->rx_pause = 1;
- } else {
- pdata->tx_pause = 0;
- pdata->rx_pause = 0;
- }
- }
-
- if (pdata->tx_pause != pdata->phy_tx_pause) {
- hw_if->config_tx_flow_control(pdata);
- pdata->phy_tx_pause = pdata->tx_pause;
- }
-
- if (pdata->rx_pause != pdata->phy_rx_pause) {
- hw_if->config_rx_flow_control(pdata);
- pdata->phy_rx_pause = pdata->rx_pause;
- }
-
- /* Speed support */
- if (phydev->speed != pdata->phy_speed) {
- new_state = 1;
-
- switch (phydev->speed) {
- case SPEED_10000:
- hw_if->set_xgmii_speed(pdata);
- break;
-
- case SPEED_2500:
- hw_if->set_gmii_2500_speed(pdata);
- break;
-
- case SPEED_1000:
- hw_if->set_gmii_speed(pdata);
- break;
- }
- pdata->phy_speed = phydev->speed;
- }
-
- if (phydev->link != pdata->phy_link) {
- new_state = 1;
- pdata->phy_link = 1;
- }
- } else if (pdata->phy_link) {
- new_state = 1;
- pdata->phy_link = 0;
- pdata->phy_speed = SPEED_UNKNOWN;
- }
-
- if (new_state)
- phy_print_status(phydev);
-}
-
static int xgbe_phy_init(struct xgbe_prv_data *pdata)
{
- struct net_device *netdev = pdata->netdev;
- struct phy_device *phydev = pdata->phydev;
- int ret;
-
pdata->phy_link = -1;
pdata->phy_speed = SPEED_UNKNOWN;
- pdata->phy_tx_pause = pdata->tx_pause;
- pdata->phy_rx_pause = pdata->rx_pause;
- ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
- pdata->phy_mode);
- if (ret) {
- netdev_err(netdev, "phy_connect_direct failed\n");
- return ret;
- }
-
- if (!phydev->drv || (phydev->drv->phy_id == 0)) {
- netdev_err(netdev, "phy_id not valid\n");
- ret = -ENODEV;
- goto err_phy_connect;
- }
- DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
- dev_name(&phydev->dev), phydev->link);
-
- return 0;
-
-err_phy_connect:
- phy_disconnect(phydev);
-
- return ret;
-}
-
-static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
-{
- if (!pdata->phydev)
- return;
-
- phy_disconnect(pdata->phydev);
+ return pdata->phy_if.phy_reset(pdata);
}
int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
@@ -889,13 +809,14 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
netif_tx_stop_all_queues(netdev);
+ xgbe_stop_timers(pdata);
+ flush_workqueue(pdata->dev_workqueue);
+
hw_if->powerdown_tx(pdata);
hw_if->powerdown_rx(pdata);
xgbe_napi_disable(pdata, 0);
- phy_stop(pdata->phydev);
-
pdata->power_down = 1;
spin_unlock_irqrestore(&pdata->lock, flags);
@@ -924,8 +845,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
pdata->power_down = 0;
- phy_start(pdata->phydev);
-
xgbe_napi_enable(pdata, 0);
hw_if->powerup_tx(pdata);
@@ -936,6 +855,8 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
netif_tx_start_all_queues(netdev);
+ xgbe_start_timers(pdata);
+
spin_unlock_irqrestore(&pdata->lock, flags);
DBGPR("<--xgbe_powerup\n");
@@ -946,6 +867,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
static int xgbe_start(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_phy_if *phy_if = &pdata->phy_if;
struct net_device *netdev = pdata->netdev;
int ret;
@@ -953,7 +875,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
hw_if->init(pdata);
- phy_start(pdata->phydev);
+ ret = phy_if->phy_start(pdata);
+ if (ret)
+ goto err_phy;
xgbe_napi_enable(pdata, 1);
@@ -964,10 +888,11 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
hw_if->enable_tx(pdata);
hw_if->enable_rx(pdata);
- xgbe_init_tx_timers(pdata);
-
netif_tx_start_all_queues(netdev);
+ xgbe_start_timers(pdata);
+ schedule_work(&pdata->service_work);
+
DBGPR("<--xgbe_start\n");
return 0;
@@ -975,8 +900,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
err_napi:
xgbe_napi_disable(pdata, 1);
- phy_stop(pdata->phydev);
+ phy_if->phy_stop(pdata);
+err_phy:
hw_if->exit(pdata);
return ret;
@@ -985,6 +911,7 @@ err_napi:
static void xgbe_stop(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_phy_if *phy_if = &pdata->phy_if;
struct xgbe_channel *channel;
struct net_device *netdev = pdata->netdev;
struct netdev_queue *txq;
@@ -994,7 +921,8 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
netif_tx_stop_all_queues(netdev);
- xgbe_stop_tx_timers(pdata);
+ xgbe_stop_timers(pdata);
+ flush_workqueue(pdata->dev_workqueue);
hw_if->disable_tx(pdata);
hw_if->disable_rx(pdata);
@@ -1003,7 +931,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
xgbe_napi_disable(pdata, 1);
- phy_stop(pdata->phydev);
+ phy_if->phy_stop(pdata);
hw_if->exit(pdata);
@@ -1374,7 +1302,7 @@ static int xgbe_open(struct net_device *netdev)
ret = clk_prepare_enable(pdata->sysclk);
if (ret) {
netdev_alert(netdev, "dma clk_prepare_enable failed\n");
- goto err_phy_init;
+ return ret;
}
ret = clk_prepare_enable(pdata->ptpclk);
@@ -1399,14 +1327,17 @@ static int xgbe_open(struct net_device *netdev)
if (ret)
goto err_channels;
- /* Initialize the device restart and Tx timestamp work struct */
+ INIT_WORK(&pdata->service_work, xgbe_service);
INIT_WORK(&pdata->restart_work, xgbe_restart);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+ xgbe_init_timers(pdata);
ret = xgbe_start(pdata);
if (ret)
goto err_rings;
+ clear_bit(XGBE_DOWN, &pdata->dev_state);
+
DBGPR("<--xgbe_open\n");
return 0;
@@ -1423,9 +1354,6 @@ err_ptpclk:
err_sysclk:
clk_disable_unprepare(pdata->sysclk);
-err_phy_init:
- xgbe_phy_exit(pdata);
-
return ret;
}
@@ -1449,8 +1377,7 @@ static int xgbe_close(struct net_device *netdev)
clk_disable_unprepare(pdata->ptpclk);
clk_disable_unprepare(pdata->sysclk);
- /* Release the phy */
- xgbe_phy_exit(pdata);
+ set_bit(XGBE_DOWN, &pdata->dev_state);
DBGPR("<--xgbe_close\n");
@@ -1478,7 +1405,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = NETDEV_TX_OK;
if (skb->len == 0) {
- netdev_err(netdev, "empty skb received from stack\n");
+ netif_err(pdata, tx_err, netdev,
+ "empty skb received from stack\n");
dev_kfree_skb_any(skb);
goto tx_netdev_return;
}
@@ -1494,7 +1422,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = xgbe_prep_tso(skb, packet);
if (ret) {
- netdev_err(netdev, "error processing TSO packet\n");
+ netif_err(pdata, tx_err, netdev,
+ "error processing TSO packet\n");
dev_kfree_skb_any(skb);
goto tx_netdev_return;
}
@@ -1513,9 +1442,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Configure required descriptor fields for transmission */
hw_if->dev_xmit(channel);
-#ifdef XGMAC_ENABLE_TX_PKT_DUMP
- xgbe_print_pkt(netdev, skb, true);
-#endif
+ if (netif_msg_pktdata(pdata))
+ xgbe_print_pkt(netdev, skb, true);
/* Stop the queue in advance if there may not be enough descriptors */
xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
@@ -1710,7 +1638,8 @@ static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
(pdata->q2tc_map[queue] == i))
queue++;
- DBGPR(" TC%u using TXq%u-%u\n", i, offset, queue - 1);
+ netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n",
+ i, offset, queue - 1);
netdev_set_tc_queue(netdev, i, queue - offset, offset);
offset = queue;
}
@@ -1820,9 +1749,10 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
lower_32_bits(rdata->rdesc_dma));
}
-static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
+static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
+ struct napi_struct *napi,
struct xgbe_ring_data *rdata,
- unsigned int *len)
+ unsigned int len)
{
struct sk_buff *skb;
u8 *packet;
@@ -1832,14 +1762,31 @@ static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
if (!skb)
return NULL;
+ /* Start with the header buffer which may contain just the header
+ * or the header plus data
+ */
+ dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma,
+ rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
+
packet = page_address(rdata->rx.hdr.pa.pages) +
rdata->rx.hdr.pa.pages_offset;
- copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
+ copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
copy_len = min(rdata->rx.hdr.dma_len, copy_len);
skb_copy_to_linear_data(skb, packet, copy_len);
skb_put(skb, copy_len);
- *len -= copy_len;
+ len -= copy_len;
+ if (len) {
+ /* Add the remaining data as a frag */
+ dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma,
+ rdata->rx.buf.dma_len, DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ rdata->rx.buf.pa.pages,
+ rdata->rx.buf.pa.pages_offset,
+ len, rdata->rx.buf.dma_len);
+ rdata->rx.buf.pa.pages = NULL;
+ }
return skb;
}
@@ -1877,9 +1824,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
* bit */
dma_rmb();
-#ifdef XGMAC_ENABLE_TX_DESC_DUMP
- xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
-#endif
+ if (netif_msg_tx_done(pdata))
+ xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
if (hw_if->is_last_desc(rdesc)) {
tx_packets += rdata->tx.packets;
@@ -1922,7 +1868,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
struct sk_buff *skb;
struct skb_shared_hwtstamps *hwtstamps;
unsigned int incomplete, error, context_next, context;
- unsigned int len, put_len, max_len;
+ unsigned int len, rdesc_len, max_len;
unsigned int received = 0;
int packet_count = 0;
@@ -1932,6 +1878,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!ring)
return 0;
+ incomplete = 0;
+ context_next = 0;
+
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
@@ -1941,15 +1890,11 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
/* First time in loop see if we need to restore state */
if (!received && rdata->state_saved) {
- incomplete = rdata->state.incomplete;
- context_next = rdata->state.context_next;
skb = rdata->state.skb;
error = rdata->state.error;
len = rdata->state.len;
} else {
memset(packet, 0, sizeof(*packet));
- incomplete = 0;
- context_next = 0;
skb = NULL;
error = 0;
len = 0;
@@ -1983,29 +1928,23 @@ read_again:
if (error || packet->errors) {
if (packet->errors)
- DBGPR("Error in received packet\n");
+ netif_err(pdata, rx_err, netdev,
+ "error in received packet\n");
dev_kfree_skb(skb);
goto next_packet;
}
if (!context) {
- put_len = rdata->rx.len - len;
- len += put_len;
-
- if (!skb) {
- dma_sync_single_for_cpu(pdata->dev,
- rdata->rx.hdr.dma,
- rdata->rx.hdr.dma_len,
- DMA_FROM_DEVICE);
-
- skb = xgbe_create_skb(napi, rdata, &put_len);
- if (!skb) {
+ /* Length is cumulative, get this descriptor's length */
+ rdesc_len = rdata->rx.len - len;
+ len += rdesc_len;
+
+ if (rdesc_len && !skb) {
+ skb = xgbe_create_skb(pdata, napi, rdata,
+ rdesc_len);
+ if (!skb)
error = 1;
- goto skip_data;
- }
- }
-
- if (put_len) {
+ } else if (rdesc_len) {
dma_sync_single_for_cpu(pdata->dev,
rdata->rx.buf.dma,
rdata->rx.buf.dma_len,
@@ -2014,12 +1953,12 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages,
rdata->rx.buf.pa.pages_offset,
- put_len, rdata->rx.buf.dma_len);
+ rdesc_len,
+ rdata->rx.buf.dma_len);
rdata->rx.buf.pa.pages = NULL;
}
}
-skip_data:
if (incomplete || context_next)
goto read_again;
@@ -2033,14 +1972,14 @@ skip_data:
max_len += VLAN_HLEN;
if (skb->len > max_len) {
- DBGPR("packet length exceeds configured MTU\n");
+ netif_err(pdata, rx_err, netdev,
+ "packet length exceeds configured MTU\n");
dev_kfree_skb(skb);
goto next_packet;
}
-#ifdef XGMAC_ENABLE_RX_PKT_DUMP
- xgbe_print_pkt(netdev, skb, false);
-#endif
+ if (netif_msg_pktdata(pdata))
+ xgbe_print_pkt(netdev, skb, false);
skb_checksum_none_assert(skb);
if (XGMAC_GET_BITS(packet->attributes,
@@ -2072,7 +2011,6 @@ skip_data:
skb_record_rx_queue(skb, channel->queue_index);
skb_mark_napi_id(skb, napi);
- netdev->last_rx = jiffies;
napi_gro_receive(napi, skb);
next_packet:
@@ -2083,8 +2021,6 @@ next_packet:
if (received && (incomplete || context_next)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdata->state_saved = 1;
- rdata->state.incomplete = incomplete;
- rdata->state.context_next = context_next;
rdata->state.skb = skb;
rdata->state.len = len;
rdata->state.error = error;
@@ -2165,8 +2101,8 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
return processed;
}
-void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
- unsigned int count, unsigned int flag)
+void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
+ unsigned int idx, unsigned int count, unsigned int flag)
{
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
@@ -2174,20 +2110,29 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
while (count--) {
rdata = XGBE_GET_DESC_DATA(ring, idx);
rdesc = rdata->rdesc;
- pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
- (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
- le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
- le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+ netdev_dbg(pdata->netdev,
+ "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(rdesc->desc0),
+ le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2),
+ le32_to_cpu(rdesc->desc3));
idx++;
}
}
-void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
+void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
unsigned int idx)
{
- pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
- le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
- le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+
+ rdata = XGBE_GET_DESC_DATA(ring, idx);
+ rdesc = rdata->rdesc;
+ netdev_dbg(pdata->netdev,
+ "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
+ idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
}
void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
@@ -2197,21 +2142,21 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
unsigned char buffer[128];
unsigned int i, j;
- netdev_alert(netdev, "\n************** SKB dump ****************\n");
+ netdev_dbg(netdev, "\n************** SKB dump ****************\n");
- netdev_alert(netdev, "%s packet of %d bytes\n",
- (tx_rx ? "TX" : "RX"), skb->len);
+ netdev_dbg(netdev, "%s packet of %d bytes\n",
+ (tx_rx ? "TX" : "RX"), skb->len);
- netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
- netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
- netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
+ netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+ netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
+ netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
for (i = 0, j = 0; i < skb->len;) {
j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
buf[i++]);
if ((i % 32) == 0) {
- netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
+ netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
j = 0;
} else if ((i % 16) == 0) {
buffer[j++] = ' ';
@@ -2221,7 +2166,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
}
}
if (i % 32)
- netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
+ netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
- netdev_alert(netdev, "\n************** SKB dump ****************\n");
+ netdev_dbg(netdev, "\n************** SKB dump ****************\n");
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 5f149e8ee20f..59e090e95c0e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -133,6 +133,12 @@ struct xgbe_stats {
offsetof(struct xgbe_prv_data, mmc_stats._var), \
}
+#define XGMAC_EXT_STAT(_string, _var) \
+ { _string, \
+ FIELD_SIZEOF(struct xgbe_ext_stats, _var), \
+ offsetof(struct xgbe_prv_data, ext_stats._var), \
+ }
+
static const struct xgbe_stats xgbe_gstring_stats[] = {
XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
XGMAC_MMC_STAT("tx_packets", txframecount_gb),
@@ -140,6 +146,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
+ XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets),
XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
@@ -171,6 +178,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
+ XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
};
#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
@@ -239,9 +247,9 @@ static void xgbe_get_pauseparam(struct net_device *netdev,
DBGPR("-->xgbe_get_pauseparam\n");
- pause->autoneg = pdata->pause_autoneg;
- pause->tx_pause = pdata->tx_pause;
- pause->rx_pause = pdata->rx_pause;
+ pause->autoneg = pdata->phy.pause_autoneg;
+ pause->tx_pause = pdata->phy.tx_pause;
+ pause->rx_pause = pdata->phy.rx_pause;
DBGPR("<--xgbe_get_pauseparam\n");
}
@@ -250,7 +258,6 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct phy_device *phydev = pdata->phydev;
int ret = 0;
DBGPR("-->xgbe_set_pauseparam\n");
@@ -258,21 +265,26 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n",
pause->autoneg, pause->tx_pause, pause->rx_pause);
- pdata->pause_autoneg = pause->autoneg;
- if (pause->autoneg) {
- phydev->advertising |= ADVERTISED_Pause;
- phydev->advertising |= ADVERTISED_Asym_Pause;
+ if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE))
+ return -EINVAL;
+
+ pdata->phy.pause_autoneg = pause->autoneg;
+ pdata->phy.tx_pause = pause->tx_pause;
+ pdata->phy.rx_pause = pause->rx_pause;
- } else {
- phydev->advertising &= ~ADVERTISED_Pause;
- phydev->advertising &= ~ADVERTISED_Asym_Pause;
+ pdata->phy.advertising &= ~ADVERTISED_Pause;
+ pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
- pdata->tx_pause = pause->tx_pause;
- pdata->rx_pause = pause->rx_pause;
+ if (pause->rx_pause) {
+ pdata->phy.advertising |= ADVERTISED_Pause;
+ pdata->phy.advertising |= ADVERTISED_Asym_Pause;
}
+ if (pause->tx_pause)
+ pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+
if (netif_running(netdev))
- ret = phy_start_aneg(phydev);
+ ret = pdata->phy_if.phy_config_aneg(pdata);
DBGPR("<--xgbe_set_pauseparam\n");
@@ -283,36 +295,39 @@ static int xgbe_get_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- int ret;
DBGPR("-->xgbe_get_settings\n");
- if (!pdata->phydev)
- return -ENODEV;
+ cmd->phy_address = pdata->phy.address;
+
+ cmd->supported = pdata->phy.supported;
+ cmd->advertising = pdata->phy.advertising;
+ cmd->lp_advertising = pdata->phy.lp_advertising;
+
+ cmd->autoneg = pdata->phy.autoneg;
+ ethtool_cmd_speed_set(cmd, pdata->phy.speed);
+ cmd->duplex = pdata->phy.duplex;
- ret = phy_ethtool_gset(pdata->phydev, cmd);
+ cmd->port = PORT_NONE;
+ cmd->transceiver = XCVR_INTERNAL;
DBGPR("<--xgbe_get_settings\n");
- return ret;
+ return 0;
}
static int xgbe_set_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct phy_device *phydev = pdata->phydev;
u32 speed;
int ret;
DBGPR("-->xgbe_set_settings\n");
- if (!pdata->phydev)
- return -ENODEV;
-
speed = ethtool_cmd_speed(cmd);
- if (cmd->phy_address != phydev->addr)
+ if (cmd->phy_address != pdata->phy.address)
return -EINVAL;
if ((cmd->autoneg != AUTONEG_ENABLE) &&
@@ -333,23 +348,23 @@ static int xgbe_set_settings(struct net_device *netdev,
return -EINVAL;
}
- cmd->advertising &= phydev->supported;
+ cmd->advertising &= pdata->phy.supported;
if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
return -EINVAL;
ret = 0;
- phydev->autoneg = cmd->autoneg;
- phydev->speed = speed;
- phydev->duplex = cmd->duplex;
- phydev->advertising = cmd->advertising;
+ pdata->phy.autoneg = cmd->autoneg;
+ pdata->phy.speed = speed;
+ pdata->phy.duplex = cmd->duplex;
+ pdata->phy.advertising = cmd->advertising;
if (cmd->autoneg == AUTONEG_ENABLE)
- phydev->advertising |= ADVERTISED_Autoneg;
+ pdata->phy.advertising |= ADVERTISED_Autoneg;
else
- phydev->advertising &= ~ADVERTISED_Autoneg;
+ pdata->phy.advertising &= ~ADVERTISED_Autoneg;
if (netif_running(netdev))
- ret = phy_start_aneg(phydev);
+ ret = pdata->phy_if.phy_config_aneg(pdata);
DBGPR("<--xgbe_set_settings\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 714905384900..fb7c961da49f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -124,9 +124,11 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/property.h>
#include <linux/acpi.h>
+#include <linux/mdio.h>
#include "xgbe.h"
#include "xgbe-common.h"
@@ -136,6 +138,49 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(XGBE_DRV_VERSION);
MODULE_DESCRIPTION(XGBE_DRV_DESC);
+static int debug = -1;
+module_param(debug, int, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(debug, " Network interface message level setting");
+
+static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP);
+
+static const u32 xgbe_serdes_blwc[] = {
+ XGBE_SPEED_1000_BLWC,
+ XGBE_SPEED_2500_BLWC,
+ XGBE_SPEED_10000_BLWC,
+};
+
+static const u32 xgbe_serdes_cdr_rate[] = {
+ XGBE_SPEED_1000_CDR,
+ XGBE_SPEED_2500_CDR,
+ XGBE_SPEED_10000_CDR,
+};
+
+static const u32 xgbe_serdes_pq_skew[] = {
+ XGBE_SPEED_1000_PQ,
+ XGBE_SPEED_2500_PQ,
+ XGBE_SPEED_10000_PQ,
+};
+
+static const u32 xgbe_serdes_tx_amp[] = {
+ XGBE_SPEED_1000_TXAMP,
+ XGBE_SPEED_2500_TXAMP,
+ XGBE_SPEED_10000_TXAMP,
+};
+
+static const u32 xgbe_serdes_dfe_tap_cfg[] = {
+ XGBE_SPEED_1000_DFE_TAP_CONFIG,
+ XGBE_SPEED_2500_DFE_TAP_CONFIG,
+ XGBE_SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 xgbe_serdes_dfe_tap_ena[] = {
+ XGBE_SPEED_1000_DFE_TAP_ENABLE,
+ XGBE_SPEED_2500_DFE_TAP_ENABLE,
+ XGBE_SPEED_10000_DFE_TAP_ENABLE,
+};
+
static void xgbe_default_config(struct xgbe_prv_data *pdata)
{
DBGPR("-->xgbe_default_config\n");
@@ -153,8 +198,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
pdata->rx_pause = 1;
pdata->phy_speed = SPEED_UNKNOWN;
pdata->power_down = 0;
- pdata->default_autoneg = AUTONEG_ENABLE;
- pdata->default_speed = SPEED_10000;
DBGPR("<--xgbe_default_config\n");
}
@@ -162,6 +205,7 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
{
xgbe_init_function_ptrs_dev(&pdata->hw_if);
+ xgbe_init_function_ptrs_phy(&pdata->phy_if);
xgbe_init_function_ptrs_desc(&pdata->desc_if);
}
@@ -248,23 +292,82 @@ static int xgbe_of_support(struct xgbe_prv_data *pdata)
return 0;
}
+
+static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+ struct device_node *phy_node;
+ struct platform_device *phy_pdev;
+
+ phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ if (phy_node) {
+ /* Old style device tree:
+ * The XGBE and PHY resources are separate
+ */
+ phy_pdev = of_find_device_by_node(phy_node);
+ of_node_put(phy_node);
+ } else {
+ /* New style device tree:
+ * The XGBE and PHY resources are grouped together with
+ * the PHY resources listed last
+ */
+ get_device(dev);
+ phy_pdev = pdata->pdev;
+ }
+
+ return phy_pdev;
+}
#else /* CONFIG_OF */
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
}
-#endif /*CONFIG_OF */
+
+static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static unsigned int xgbe_resource_count(struct platform_device *pdev,
+ unsigned int type)
+{
+ unsigned int count;
+ int i;
+
+ for (i = 0, count = 0; i < pdev->num_resources; i++) {
+ struct resource *res = &pdev->resource[i];
+
+ if (type == resource_type(res))
+ count++;
+ }
+
+ return count;
+}
+
+static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ struct platform_device *phy_pdev;
+
+ if (pdata->use_acpi) {
+ get_device(pdata->dev);
+ phy_pdev = pdata->pdev;
+ } else {
+ phy_pdev = xgbe_of_get_phy_pdev(pdata);
+ }
+
+ return phy_pdev;
+}
static int xgbe_probe(struct platform_device *pdev)
{
struct xgbe_prv_data *pdata;
- struct xgbe_hw_if *hw_if;
- struct xgbe_desc_if *desc_if;
struct net_device *netdev;
- struct device *dev = &pdev->dev;
+ struct device *dev = &pdev->dev, *phy_dev;
+ struct platform_device *phy_pdev;
struct resource *res;
const char *phy_mode;
- unsigned int i;
+ unsigned int i, phy_memnum, phy_irqnum;
int ret;
DBGPR("--> xgbe_probe\n");
@@ -289,9 +392,36 @@ static int xgbe_probe(struct platform_device *pdev)
mutex_init(&pdata->rss_mutex);
spin_lock_init(&pdata->tstamp_lock);
+ pdata->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ set_bit(XGBE_DOWN, &pdata->dev_state);
+
/* Check if we should use ACPI or DT */
pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
+ phy_pdev = xgbe_get_phy_pdev(pdata);
+ if (!phy_pdev) {
+ dev_err(dev, "unable to obtain phy device\n");
+ ret = -EINVAL;
+ goto err_phydev;
+ }
+ phy_dev = &phy_pdev->dev;
+
+ if (pdev == phy_pdev) {
+ /* New style device tree or ACPI:
+ * The XGBE and PHY resources are grouped together with
+ * the PHY resources listed last
+ */
+ phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
+ phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
+ } else {
+ /* Old style device tree:
+ * The XGBE and PHY resources are separate
+ */
+ phy_memnum = 0;
+ phy_irqnum = 0;
+ }
+
/* Set and validate the number of descriptors for a ring */
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
pdata->tx_desc_count = XGBE_TX_DESC_CNT;
@@ -318,7 +448,8 @@ static int xgbe_probe(struct platform_device *pdev)
ret = PTR_ERR(pdata->xgmac_regs);
goto err_io;
}
- DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs);
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
pdata->xpcs_regs = devm_ioremap_resource(dev, res);
@@ -327,7 +458,38 @@ static int xgbe_probe(struct platform_device *pdev)
ret = PTR_ERR(pdata->xpcs_regs);
goto err_io;
}
- DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
+
+ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+ pdata->rxtx_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->rxtx_regs)) {
+ dev_err(dev, "rxtx ioremap failed\n");
+ ret = PTR_ERR(pdata->rxtx_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
+
+ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+ pdata->sir0_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->sir0_regs)) {
+ dev_err(dev, "sir0 ioremap failed\n");
+ ret = PTR_ERR(pdata->sir0_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
+
+ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+ pdata->sir1_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->sir1_regs)) {
+ dev_err(dev, "sir1 ioremap failed\n");
+ ret = PTR_ERR(pdata->sir1_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
/* Retrieve the MAC address */
ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
@@ -355,6 +517,115 @@ static int xgbe_probe(struct platform_device *pdev)
if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
pdata->per_channel_irq = 1;
+ /* Retrieve the PHY speedset */
+ ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
+ &pdata->speed_set);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
+ goto err_io;
+ }
+
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ case XGBE_SPEEDSET_2500_10000:
+ break;
+ default:
+ dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Retrieve the PHY configuration properties */
+ if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_BLWC_PROPERTY,
+ pdata->serdes_blwc,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_BLWC_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
+ sizeof(pdata->serdes_blwc));
+ }
+
+ if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_CDR_RATE_PROPERTY,
+ pdata->serdes_cdr_rate,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_CDR_RATE_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
+ sizeof(pdata->serdes_cdr_rate));
+ }
+
+ if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PQ_SKEW_PROPERTY,
+ pdata->serdes_pq_skew,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PQ_SKEW_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
+ sizeof(pdata->serdes_pq_skew));
+ }
+
+ if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_TX_AMP_PROPERTY,
+ pdata->serdes_tx_amp,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_TX_AMP_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
+ sizeof(pdata->serdes_tx_amp));
+ }
+
+ if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_DFE_CFG_PROPERTY,
+ pdata->serdes_dfe_tap_cfg,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_DFE_CFG_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
+ sizeof(pdata->serdes_dfe_tap_cfg));
+ }
+
+ if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_DFE_ENA_PROPERTY,
+ pdata->serdes_dfe_tap_ena,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_DFE_ENA_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
+ sizeof(pdata->serdes_dfe_tap_ena));
+ }
+
/* Obtain device settings unique to ACPI/OF */
if (pdata->use_acpi)
ret = xgbe_acpi_support(pdata);
@@ -382,17 +653,23 @@ static int xgbe_probe(struct platform_device *pdev)
}
pdata->dev_irq = ret;
+ /* Get the auto-negotiation interrupt */
+ ret = platform_get_irq(phy_pdev, phy_irqnum++);
+ if (ret < 0) {
+ dev_err(dev, "platform_get_irq phy 0 failed\n");
+ goto err_io;
+ }
+ pdata->an_irq = ret;
+
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
/* Set all the function pointers */
xgbe_init_all_fptrs(pdata);
- hw_if = &pdata->hw_if;
- desc_if = &pdata->desc_if;
/* Issue software reset to device */
- hw_if->exit(pdata);
+ pdata->hw_if.exit(pdata);
/* Populate the hardware features */
xgbe_get_all_hw_features(pdata);
@@ -401,8 +678,6 @@ static int xgbe_probe(struct platform_device *pdev)
xgbe_default_config(pdata);
/* Set the DMA mask */
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
ret = dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(pdata->hw_feat.dma_width));
if (ret) {
@@ -447,16 +722,8 @@ static int xgbe_probe(struct platform_device *pdev)
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
- /* Prepare to regsiter with MDIO */
- pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
- if (!pdata->mii_bus_id) {
- dev_err(dev, "failed to allocate mii bus id\n");
- ret = -ENOMEM;
- goto err_io;
- }
- ret = xgbe_mdio_register(pdata);
- if (ret)
- goto err_bus_id;
+ /* Call MDIO/PHY initialization routine */
+ pdata->phy_if.phy_init(pdata);
/* Set device operations */
netdev->netdev_ops = xgbe_get_netdev_ops();
@@ -501,26 +768,52 @@ static int xgbe_probe(struct platform_device *pdev)
ret = register_netdev(netdev);
if (ret) {
dev_err(dev, "net device registration failed\n");
- goto err_reg_netdev;
+ goto err_io;
+ }
+
+ /* Create the PHY/ANEG name based on netdev name */
+ snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
+ netdev_name(netdev));
+
+ /* Create workqueues */
+ pdata->dev_workqueue =
+ create_singlethread_workqueue(netdev_name(netdev));
+ if (!pdata->dev_workqueue) {
+ netdev_err(netdev, "device workqueue creation failed\n");
+ ret = -ENOMEM;
+ goto err_netdev;
+ }
+
+ pdata->an_workqueue =
+ create_singlethread_workqueue(pdata->an_name);
+ if (!pdata->an_workqueue) {
+ netdev_err(netdev, "phy workqueue creation failed\n");
+ ret = -ENOMEM;
+ goto err_wq;
}
xgbe_ptp_register(pdata);
xgbe_debugfs_init(pdata);
+ platform_device_put(phy_pdev);
+
netdev_notice(netdev, "net device enabled\n");
DBGPR("<-- xgbe_probe\n");
return 0;
-err_reg_netdev:
- xgbe_mdio_unregister(pdata);
+err_wq:
+ destroy_workqueue(pdata->dev_workqueue);
-err_bus_id:
- kfree(pdata->mii_bus_id);
+err_netdev:
+ unregister_netdev(netdev);
err_io:
+ platform_device_put(phy_pdev);
+
+err_phydev:
free_netdev(netdev);
err_alloc:
@@ -540,11 +833,13 @@ static int xgbe_remove(struct platform_device *pdev)
xgbe_ptp_unregister(pdata);
- unregister_netdev(netdev);
+ flush_workqueue(pdata->an_workqueue);
+ destroy_workqueue(pdata->an_workqueue);
- xgbe_mdio_unregister(pdata);
+ flush_workqueue(pdata->dev_workqueue);
+ destroy_workqueue(pdata->dev_workqueue);
- kfree(pdata->mii_bus_id);
+ unregister_netdev(netdev);
free_netdev(netdev);
@@ -557,16 +852,17 @@ static int xgbe_remove(struct platform_device *pdev)
static int xgbe_suspend(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
- int ret;
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret = 0;
DBGPR("-->xgbe_suspend\n");
- if (!netif_running(netdev)) {
- DBGPR("<--xgbe_dev_suspend\n");
- return -EINVAL;
- }
+ if (netif_running(netdev))
+ ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
- ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+ pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
DBGPR("<--xgbe_suspend\n");
@@ -576,16 +872,16 @@ static int xgbe_suspend(struct device *dev)
static int xgbe_resume(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
- int ret;
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret = 0;
DBGPR("-->xgbe_resume\n");
- if (!netif_running(netdev)) {
- DBGPR("<--xgbe_dev_resume\n");
- return -EINVAL;
- }
+ pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
- ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+ if (netif_running(netdev))
+ ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
DBGPR("<--xgbe_resume\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 59e267f3f1b7..9088c3a35a20 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -119,194 +119,1246 @@
#include <linux/mdio.h>
#include <linux/phy.h>
#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
#include "xgbe.h"
#include "xgbe-common.h"
-static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
+static void xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata)
{
- struct xgbe_prv_data *pdata = mii->priv;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- int mmd_data;
+ unsigned int reg;
- DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
- prtad, mmd_reg);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- mmd_data = hw_if->read_mmd_regs(pdata, prtad, mmd_reg);
+ reg |= XGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+}
+
+static void xgbe_an_disable_kr_training(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
- DBGPR_MDIO("<--xgbe_mdio_read: mmd_data=%#x\n", mmd_data);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- return mmd_data;
+ reg &= ~XGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
}
-static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
- u16 mmd_val)
+static void xgbe_pcs_power_cycle(struct xgbe_prv_data *pdata)
{
- struct xgbe_prv_data *pdata = mii->priv;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- int mmd_data = mmd_val;
+ unsigned int reg;
- DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
- prtad, mmd_reg, mmd_data);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- hw_if->write_mmd_regs(pdata, prtad, mmd_reg, mmd_data);
+ reg |= MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
- DBGPR_MDIO("<--xgbe_mdio_write\n");
+ usleep_range(75, 100);
- return 0;
+ reg &= ~MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
}
-void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+static void xgbe_serdes_start_ratechange(struct xgbe_prv_data *pdata)
{
- struct device *dev = pdata->dev;
- struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD];
- int i;
-
- dev_alert(dev, "\n************* PHY Reg dump **********************\n");
-
- dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
- dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
- dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
- dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
- dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
- dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
-
- dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
- dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
- dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
- dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE + 1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
- dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE + 2,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
- dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
- MDIO_AN_COMP_STAT,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
-
- dev_alert(dev, "MMD Device Mask = %#x\n",
- phydev->c45_ids.devices_in_package);
- for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++)
- dev_alert(dev, " MMD %d: ID = %#08x\n", i,
- phydev->c45_ids.device_ids[i]);
-
- dev_alert(dev, "\n*************************************************\n");
-}
-
-int xgbe_mdio_register(struct xgbe_prv_data *pdata)
-{
- struct mii_bus *mii;
- struct phy_device *phydev;
- int ret = 0;
-
- DBGPR("-->xgbe_mdio_register\n");
-
- mii = mdiobus_alloc();
- if (!mii) {
- dev_err(pdata->dev, "mdiobus_alloc failed\n");
- return -ENOMEM;
+ /* Assert Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
+}
+
+static void xgbe_serdes_complete_ratechange(struct xgbe_prv_data *pdata)
+{
+ unsigned int wait;
+ u16 status;
+
+ /* Release Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
+
+ /* Wait for Rx and Tx ready */
+ wait = XGBE_RATECHANGE_COUNT;
+ while (wait--) {
+ usleep_range(50, 75);
+
+ status = XSIR0_IOREAD(pdata, SIR0_STATUS);
+ if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
+ XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
+ goto rx_reset;
}
- /* Register on the MDIO bus (don't probe any PHYs) */
- mii->name = XGBE_PHY_NAME;
- mii->read = xgbe_mdio_read;
- mii->write = xgbe_mdio_write;
- snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id);
- mii->priv = pdata;
- mii->phy_mask = ~0;
- mii->parent = pdata->dev;
- ret = mdiobus_register(mii);
- if (ret) {
- dev_err(pdata->dev, "mdiobus_register failed\n");
- goto err_mdiobus_alloc;
+ netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
+ status);
+
+rx_reset:
+ /* Perform Rx reset for the DFE changes */
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
+}
+
+static void xgbe_xgmii_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ /* Enable KR training */
+ xgbe_an_enable_kr_training(pdata);
+
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_xgmii_speed(pdata);
+
+ /* Set PCS to KR/10G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBR;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED10G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 10G speed */
+ xgbe_serdes_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ pdata->serdes_cdr_rate[XGBE_SPEED_10000]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ pdata->serdes_tx_amp[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ pdata->serdes_blwc[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ pdata->serdes_pq_skew[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ pdata->serdes_dfe_tap_cfg[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ pdata->serdes_dfe_tap_ena[XGBE_SPEED_10000]);
+
+ xgbe_serdes_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
+}
+
+static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ /* Disable KR training */
+ xgbe_an_disable_kr_training(pdata);
+
+ /* Set MAC to 2.5G speed */
+ pdata->hw_if.set_gmii_2500_speed(pdata);
+
+ /* Set PCS to KX/1G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBX;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED1G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 2.5G speed */
+ xgbe_serdes_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ pdata->serdes_cdr_rate[XGBE_SPEED_2500]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ pdata->serdes_tx_amp[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ pdata->serdes_blwc[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ pdata->serdes_pq_skew[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ pdata->serdes_dfe_tap_cfg[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ pdata->serdes_dfe_tap_ena[XGBE_SPEED_2500]);
+
+ xgbe_serdes_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
+}
+
+static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ /* Disable KR training */
+ xgbe_an_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_gmii_speed(pdata);
+
+ /* Set PCS to KX/1G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBX;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED1G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 1G speed */
+ xgbe_serdes_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ pdata->serdes_cdr_rate[XGBE_SPEED_1000]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ pdata->serdes_tx_amp[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ pdata->serdes_blwc[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ pdata->serdes_pq_skew[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ pdata->serdes_dfe_tap_cfg[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ pdata->serdes_dfe_tap_ena[XGBE_SPEED_1000]);
+
+ xgbe_serdes_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
+}
+
+static void xgbe_cur_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode *mode)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ if ((reg & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
+ *mode = XGBE_MODE_KR;
+ else
+ *mode = XGBE_MODE_KX;
+}
+
+static bool xgbe_in_kr_mode(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mode mode;
+
+ xgbe_cur_mode(pdata, &mode);
+
+ return (mode == XGBE_MODE_KR);
+}
+
+static void xgbe_switch_mode(struct xgbe_prv_data *pdata)
+{
+ /* If we are in KR switch to KX, and vice-versa */
+ if (xgbe_in_kr_mode(pdata)) {
+ if (pdata->speed_set == XGBE_SPEEDSET_1000_10000)
+ xgbe_gmii_mode(pdata);
+ else
+ xgbe_gmii_2500_mode(pdata);
+ } else {
+ xgbe_xgmii_mode(pdata);
}
- DBGPR(" mdiobus_register succeeded for %s\n", pdata->mii_bus_id);
-
- /* Probe the PCS using Clause 45 */
- phydev = get_phy_device(mii, XGBE_PRTAD, true);
- if (IS_ERR(phydev) || !phydev ||
- !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) {
- dev_err(pdata->dev, "get_phy_device failed\n");
- ret = phydev ? PTR_ERR(phydev) : -ENOLINK;
- goto err_mdiobus_register;
+}
+
+static void xgbe_set_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ enum xgbe_mode cur_mode;
+
+ xgbe_cur_mode(pdata, &cur_mode);
+ if (mode != cur_mode)
+ xgbe_switch_mode(pdata);
+}
+
+static bool xgbe_use_xgmii_mode(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+ return true;
+ } else {
+ if (pdata->phy.speed == SPEED_10000)
+ return true;
}
- request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
- MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
- ret = phy_device_register(phydev);
- if (ret) {
- dev_err(pdata->dev, "phy_device_register failed\n");
- goto err_phy_device;
+ return false;
+}
+
+static bool xgbe_use_gmii_2500_mode(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
+ return true;
+ } else {
+ if (pdata->phy.speed == SPEED_2500)
+ return true;
}
- if (!phydev->dev.driver) {
- dev_err(pdata->dev, "phy driver probe failed\n");
- ret = -EIO;
- goto err_phy_device;
+
+ return false;
+}
+
+static bool xgbe_use_gmii_mode(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
+ return true;
+ } else {
+ if (pdata->phy.speed == SPEED_1000)
+ return true;
}
- /* Add a reference to the PHY driver so it can't be unloaded */
- pdata->phy_module = phydev->dev.driver->owner;
- if (!try_module_get(pdata->phy_module)) {
- dev_err(pdata->dev, "try_module_get failed\n");
- ret = -EIO;
- goto err_phy_device;
+ return false;
+}
+
+static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+
+ if (enable)
+ reg |= MDIO_AN_CTRL1_ENABLE;
+
+ if (restart)
+ reg |= MDIO_AN_CTRL1_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+}
+
+static void xgbe_restart_an(struct xgbe_prv_data *pdata)
+{
+ xgbe_set_an(pdata, true, true);
+
+ netif_dbg(pdata, link, pdata->netdev, "AN enabled/restarted\n");
+}
+
+static void xgbe_disable_an(struct xgbe_prv_data *pdata)
+{
+ xgbe_set_an(pdata, false, false);
+
+ netif_dbg(pdata, link, pdata->netdev, "AN disabled\n");
+}
+
+static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg, reg;
+
+ *state = XGBE_RX_COMPLETE;
+
+ /* If we're not in KR mode then we're done */
+ if (!xgbe_in_kr_mode(pdata))
+ return XGBE_AN_PAGE_RECEIVED;
+
+ /* Enable/Disable FEC */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL);
+ reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE);
+ if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
+ reg |= pdata->fec_ability;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
+
+ /* Start KR training */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (reg & XGBE_KR_TRAINING_ENABLE) {
+ XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
+
+ reg |= XGBE_KR_TRAINING_START;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+ reg);
+
+ XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "KR training initiated\n");
}
- pdata->mii = mii;
- pdata->mdio_mmd = MDIO_MMD_PCS;
+ return XGBE_AN_PAGE_RECEIVED;
+}
+
+static enum xgbe_an xgbe_an_tx_xnp(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ u16 msg;
+
+ *state = XGBE_RX_XNP;
+
+ msg = XGBE_XNP_MCF_NULL_MESSAGE;
+ msg |= XGBE_XNP_MP_FORMATTED;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg);
+
+ return XGBE_AN_PAGE_RECEIVED;
+}
+
+static enum xgbe_an xgbe_an_rx_bpa(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ unsigned int link_support;
+ unsigned int reg, ad_reg, lp_reg;
+
+ /* Read Base Ability register 2 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+
+ /* Check for a supported mode, otherwise restart in a different one */
+ link_support = xgbe_in_kr_mode(pdata) ? 0x80 : 0x20;
+ if (!(reg & link_support))
+ return XGBE_AN_INCOMPAT_LINK;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+
+ return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & XGBE_XNP_NP_EXCHANGE))
+ ? xgbe_an_tx_xnp(pdata, state)
+ : xgbe_an_tx_training(pdata, state);
+}
+
+static enum xgbe_an xgbe_an_rx_xnp(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX);
+
+ return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & XGBE_XNP_NP_EXCHANGE))
+ ? xgbe_an_tx_xnp(pdata, state)
+ : xgbe_an_tx_training(pdata, state);
+}
+
+static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_rx *state;
+ unsigned long an_timeout;
+ enum xgbe_an ret;
+
+ if (!pdata->an_start) {
+ pdata->an_start = jiffies;
+ } else {
+ an_timeout = pdata->an_start +
+ msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
+ if (time_after(jiffies, an_timeout)) {
+ /* Auto-negotiation timed out, reset state */
+ pdata->kr_state = XGBE_RX_BPA;
+ pdata->kx_state = XGBE_RX_BPA;
+
+ pdata->an_start = jiffies;
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "AN timed out, resetting state\n");
+ }
+ }
- phydev->autoneg = pdata->default_autoneg;
- if (phydev->autoneg == AUTONEG_DISABLE) {
- phydev->speed = pdata->default_speed;
- phydev->duplex = DUPLEX_FULL;
+ state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state
+ : &pdata->kx_state;
- phydev->advertising &= ~ADVERTISED_Autoneg;
+ switch (*state) {
+ case XGBE_RX_BPA:
+ ret = xgbe_an_rx_bpa(pdata, state);
+ break;
+
+ case XGBE_RX_XNP:
+ ret = xgbe_an_rx_xnp(pdata, state);
+ break;
+
+ default:
+ ret = XGBE_AN_ERROR;
+ }
+
+ return ret;
+}
+
+static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata)
+{
+ /* Be sure we aren't looping trying to negotiate */
+ if (xgbe_in_kr_mode(pdata)) {
+ pdata->kr_state = XGBE_RX_ERROR;
+
+ if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) &&
+ !(pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+ return XGBE_AN_NO_LINK;
+
+ if (pdata->kx_state != XGBE_RX_BPA)
+ return XGBE_AN_NO_LINK;
+ } else {
+ pdata->kx_state = XGBE_RX_ERROR;
+
+ if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full))
+ return XGBE_AN_NO_LINK;
+
+ if (pdata->kr_state != XGBE_RX_BPA)
+ return XGBE_AN_NO_LINK;
+ }
+
+ xgbe_disable_an(pdata);
+
+ xgbe_switch_mode(pdata);
+
+ xgbe_restart_an(pdata);
+
+ return XGBE_AN_INCOMPAT_LINK;
+}
+
+static irqreturn_t xgbe_an_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+ netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
+
+ /* Interrupt reason must be read and cleared outside of IRQ context */
+ disable_irq_nosync(pdata->an_irq);
+
+ queue_work(pdata->an_workqueue, &pdata->an_irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void xgbe_an_irq_work(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ an_irq_work);
+
+ /* Avoid a race between enabling the IRQ and exiting the work by
+ * waiting for the work to finish and then queueing it
+ */
+ flush_work(&pdata->an_work);
+ queue_work(pdata->an_workqueue, &pdata->an_work);
+}
+
+static const char *xgbe_state_as_string(enum xgbe_an state)
+{
+ switch (state) {
+ case XGBE_AN_READY:
+ return "Ready";
+ case XGBE_AN_PAGE_RECEIVED:
+ return "Page-Received";
+ case XGBE_AN_INCOMPAT_LINK:
+ return "Incompatible-Link";
+ case XGBE_AN_COMPLETE:
+ return "Complete";
+ case XGBE_AN_NO_LINK:
+ return "No-Link";
+ case XGBE_AN_ERROR:
+ return "Error";
+ default:
+ return "Undefined";
+ }
+}
+
+static void xgbe_an_state_machine(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ an_work);
+ enum xgbe_an cur_state = pdata->an_state;
+ unsigned int int_reg, int_mask;
+
+ mutex_lock(&pdata->an_mutex);
+
+ /* Read the interrupt */
+ int_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
+ if (!int_reg)
+ goto out;
+
+next_int:
+ if (int_reg & XGBE_AN_PG_RCV) {
+ pdata->an_state = XGBE_AN_PAGE_RECEIVED;
+ int_mask = XGBE_AN_PG_RCV;
+ } else if (int_reg & XGBE_AN_INC_LINK) {
+ pdata->an_state = XGBE_AN_INCOMPAT_LINK;
+ int_mask = XGBE_AN_INC_LINK;
+ } else if (int_reg & XGBE_AN_INT_CMPLT) {
+ pdata->an_state = XGBE_AN_COMPLETE;
+ int_mask = XGBE_AN_INT_CMPLT;
+ } else {
+ pdata->an_state = XGBE_AN_ERROR;
+ int_mask = 0;
}
- pdata->phydev = phydev;
+ /* Clear the interrupt to be processed */
+ int_reg &= ~int_mask;
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
+
+ pdata->an_result = pdata->an_state;
+
+again:
+ netif_dbg(pdata, link, pdata->netdev, "AN %s\n",
+ xgbe_state_as_string(pdata->an_state));
+
+ cur_state = pdata->an_state;
+
+ switch (pdata->an_state) {
+ case XGBE_AN_READY:
+ pdata->an_supported = 0;
+ break;
+
+ case XGBE_AN_PAGE_RECEIVED:
+ pdata->an_state = xgbe_an_page_received(pdata);
+ pdata->an_supported++;
+ break;
+
+ case XGBE_AN_INCOMPAT_LINK:
+ pdata->an_supported = 0;
+ pdata->parallel_detect = 0;
+ pdata->an_state = xgbe_an_incompat_link(pdata);
+ break;
- DBGPHY_REGS(pdata);
+ case XGBE_AN_COMPLETE:
+ pdata->parallel_detect = pdata->an_supported ? 0 : 1;
+ netif_dbg(pdata, link, pdata->netdev, "%s successful\n",
+ pdata->an_supported ? "Auto negotiation"
+ : "Parallel detection");
+ break;
- DBGPR("<--xgbe_mdio_register\n");
+ case XGBE_AN_NO_LINK:
+ break;
+
+ default:
+ pdata->an_state = XGBE_AN_ERROR;
+ }
+
+ if (pdata->an_state == XGBE_AN_NO_LINK) {
+ int_reg = 0;
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ } else if (pdata->an_state == XGBE_AN_ERROR) {
+ netdev_err(pdata->netdev,
+ "error during auto-negotiation, state=%u\n",
+ cur_state);
+
+ int_reg = 0;
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ }
+
+ if (pdata->an_state >= XGBE_AN_COMPLETE) {
+ pdata->an_result = pdata->an_state;
+ pdata->an_state = XGBE_AN_READY;
+ pdata->kr_state = XGBE_RX_BPA;
+ pdata->kx_state = XGBE_RX_BPA;
+ pdata->an_start = 0;
+
+ netif_dbg(pdata, link, pdata->netdev, "AN result: %s\n",
+ xgbe_state_as_string(pdata->an_result));
+ }
+
+ if (cur_state != pdata->an_state)
+ goto again;
+
+ if (int_reg)
+ goto next_int;
+
+out:
+ enable_irq(pdata->an_irq);
+
+ mutex_unlock(&pdata->an_mutex);
+}
+
+static void xgbe_an_init(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ /* Set up Advertisement register 3 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (pdata->phy.advertising & ADVERTISED_10000baseR_FEC)
+ reg |= 0xc000;
+ else
+ reg &= ~0xc000;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
+
+ /* Set up Advertisement register 2 next */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+ reg |= 0x80;
+ else
+ reg &= ~0x80;
+
+ if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
+ (pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+ reg |= 0x20;
+ else
+ reg &= ~0x20;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg);
+
+ /* Set up Advertisement register 1 last */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (pdata->phy.advertising & ADVERTISED_Pause)
+ reg |= 0x400;
+ else
+ reg &= ~0x400;
+
+ if (pdata->phy.advertising & ADVERTISED_Asym_Pause)
+ reg |= 0x800;
+ else
+ reg &= ~0x800;
+
+ /* We don't intend to perform XNP */
+ reg &= ~XGBE_XNP_NP_EXCHANGE;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+
+ netif_dbg(pdata, link, pdata->netdev, "AN initialized\n");
+}
+
+static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
+{
+ if (pdata->tx_pause && pdata->rx_pause)
+ return "rx/tx";
+ else if (pdata->rx_pause)
+ return "rx";
+ else if (pdata->tx_pause)
+ return "tx";
+ else
+ return "off";
+}
+
+static const char *xgbe_phy_speed_string(int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return "1Gbps";
+ case SPEED_2500:
+ return "2.5Gbps";
+ case SPEED_10000:
+ return "10Gbps";
+ case SPEED_UNKNOWN:
+ return "Unknown";
+ default:
+ return "Unsupported";
+ }
+}
+
+static void xgbe_phy_print_status(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy.link)
+ netdev_info(pdata->netdev,
+ "Link is Up - %s/%s - flow control %s\n",
+ xgbe_phy_speed_string(pdata->phy.speed),
+ pdata->phy.duplex == DUPLEX_FULL ? "Full" : "Half",
+ xgbe_phy_fc_string(pdata));
+ else
+ netdev_info(pdata->netdev, "Link is Down\n");
+}
+
+static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
+{
+ int new_state = 0;
+
+ if (pdata->phy.link) {
+ /* Flow control support */
+ pdata->pause_autoneg = pdata->phy.pause_autoneg;
+
+ if (pdata->tx_pause != pdata->phy.tx_pause) {
+ new_state = 1;
+ pdata->hw_if.config_tx_flow_control(pdata);
+ pdata->tx_pause = pdata->phy.tx_pause;
+ }
+
+ if (pdata->rx_pause != pdata->phy.rx_pause) {
+ new_state = 1;
+ pdata->hw_if.config_rx_flow_control(pdata);
+ pdata->rx_pause = pdata->phy.rx_pause;
+ }
+
+ /* Speed support */
+ if (pdata->phy_speed != pdata->phy.speed) {
+ new_state = 1;
+ pdata->phy_speed = pdata->phy.speed;
+ }
+
+ if (pdata->phy_link != pdata->phy.link) {
+ new_state = 1;
+ pdata->phy_link = pdata->phy.link;
+ }
+ } else if (pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+
+ if (new_state && netif_msg_link(pdata))
+ xgbe_phy_print_status(pdata);
+}
+
+static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+{
+ netif_dbg(pdata, link, pdata->netdev, "fixed PHY configuration\n");
+
+ /* Disable auto-negotiation */
+ xgbe_disable_an(pdata);
+
+ /* Validate/Set specified speed */
+ switch (pdata->phy.speed) {
+ case SPEED_10000:
+ xgbe_set_mode(pdata, XGBE_MODE_KR);
+ break;
+
+ case SPEED_2500:
+ case SPEED_1000:
+ xgbe_set_mode(pdata, XGBE_MODE_KX);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Validate duplex mode */
+ if (pdata->phy.duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+ set_bit(XGBE_LINK_INIT, &pdata->dev_state);
+ pdata->link_check = jiffies;
+
+ if (pdata->phy.autoneg != AUTONEG_ENABLE)
+ return xgbe_phy_config_fixed(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "AN PHY configuration\n");
+
+ /* Disable auto-negotiation interrupt */
+ disable_irq(pdata->an_irq);
+
+ /* Start auto-negotiation in a supported mode */
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) {
+ xgbe_set_mode(pdata, XGBE_MODE_KR);
+ } else if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
+ (pdata->phy.advertising & ADVERTISED_2500baseX_Full)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KX);
+ } else {
+ enable_irq(pdata->an_irq);
+ return -EINVAL;
+ }
+
+ /* Disable and stop any in progress auto-negotiation */
+ xgbe_disable_an(pdata);
+
+ /* Clear any auto-negotitation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ pdata->an_result = XGBE_AN_READY;
+ pdata->an_state = XGBE_AN_READY;
+ pdata->kr_state = XGBE_RX_BPA;
+ pdata->kx_state = XGBE_RX_BPA;
+
+ /* Re-enable auto-negotiation interrupt */
+ enable_irq(pdata->an_irq);
+
+ /* Set up advertisement registers based on current settings */
+ xgbe_an_init(pdata);
+
+ /* Enable and start auto-negotiation */
+ xgbe_restart_an(pdata);
return 0;
+}
+
+static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ mutex_lock(&pdata->an_mutex);
+
+ ret = __xgbe_phy_config_aneg(pdata);
+ if (ret)
+ set_bit(XGBE_LINK_ERR, &pdata->dev_state);
+ else
+ clear_bit(XGBE_LINK_ERR, &pdata->dev_state);
+
+ mutex_unlock(&pdata->an_mutex);
+
+ return ret;
+}
+
+static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
+{
+ return (pdata->an_result == XGBE_AN_COMPLETE);
+}
+
+static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
+{
+ unsigned long link_timeout;
+
+ link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
+ if (time_after(jiffies, link_timeout)) {
+ netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
+ xgbe_phy_config_aneg(pdata);
+ }
+}
+
+static void xgbe_phy_status_force(struct xgbe_prv_data *pdata)
+{
+ if (xgbe_in_kr_mode(pdata)) {
+ pdata->phy.speed = SPEED_10000;
+ } else {
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ pdata->phy.speed = SPEED_1000;
+ break;
+
+ case XGBE_SPEEDSET_2500_10000:
+ pdata->phy.speed = SPEED_2500;
+ break;
+ }
+ }
+ pdata->phy.duplex = DUPLEX_FULL;
+}
+
+static void xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
+{
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising = 0;
+
+ if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
+ return xgbe_phy_status_force(pdata);
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+ pdata->phy.lp_advertising |= ADVERTISED_Pause;
+ if (lp_reg & 0x800)
+ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x400) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x800) {
+ if (ad_reg & 0x400)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x400)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+ if (lp_reg & 0x20) {
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ case XGBE_SPEEDSET_2500_10000:
+ pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full;
+ break;
+ }
+ }
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80) {
+ pdata->phy.speed = SPEED_10000;
+ xgbe_set_mode(pdata, XGBE_MODE_KR);
+ } else if (ad_reg & 0x20) {
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ pdata->phy.speed = SPEED_1000;
+ break;
+
+ case XGBE_SPEEDSET_2500_10000:
+ pdata->phy.speed = SPEED_2500;
+ break;
+ }
+
+ xgbe_set_mode(pdata, XGBE_MODE_KX);
+ } else {
+ pdata->phy.speed = SPEED_UNKNOWN;
+ }
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+ pdata->phy.duplex = DUPLEX_FULL;
+}
+
+static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg, link_aneg;
+
+ if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
+ if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
+ netif_carrier_off(pdata->netdev);
+
+ pdata->phy.link = 0;
+ goto adjust_link;
+ }
+
+ link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
+
+ /* Get the link status. Link status is latched low, so read
+ * once to clear and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ pdata->phy.link = (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
+
+ if (pdata->phy.link) {
+ if (link_aneg && !xgbe_phy_aneg_done(pdata)) {
+ xgbe_check_link_timeout(pdata);
+ return;
+ }
+
+ xgbe_phy_status_aneg(pdata);
+
+ if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
+ clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
+
+ if (!test_bit(XGBE_LINK, &pdata->dev_state)) {
+ set_bit(XGBE_LINK, &pdata->dev_state);
+ netif_carrier_on(pdata->netdev);
+ }
+ } else {
+ if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
+ xgbe_check_link_timeout(pdata);
+
+ if (link_aneg)
+ return;
+ }
+
+ xgbe_phy_status_aneg(pdata);
+
+ if (test_bit(XGBE_LINK, &pdata->dev_state)) {
+ clear_bit(XGBE_LINK, &pdata->dev_state);
+ netif_carrier_off(pdata->netdev);
+ }
+ }
+
+adjust_link:
+ xgbe_phy_adjust_link(pdata);
+}
+
+static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+{
+ netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n");
+
+ /* Disable auto-negotiation */
+ xgbe_disable_an(pdata);
+
+ /* Disable auto-negotiation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+
+ devm_free_irq(pdata->dev, pdata->an_irq, pdata);
-err_phy_device:
- phy_device_free(phydev);
+ pdata->phy.link = 0;
+ if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
+ netif_carrier_off(pdata->netdev);
-err_mdiobus_register:
- mdiobus_unregister(mii);
+ xgbe_phy_adjust_link(pdata);
+}
+
+static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ int ret;
+
+ netif_dbg(pdata, link, pdata->netdev, "starting PHY\n");
+
+ ret = devm_request_irq(pdata->dev, pdata->an_irq,
+ xgbe_an_isr, 0, pdata->an_name,
+ pdata);
+ if (ret) {
+ netdev_err(netdev, "phy irq request failed\n");
+ return ret;
+ }
+
+ /* Set initial mode - call the mode setting routines
+ * directly to insure we are properly configured
+ */
+ if (xgbe_use_xgmii_mode(pdata)) {
+ xgbe_xgmii_mode(pdata);
+ } else if (xgbe_use_gmii_mode(pdata)) {
+ xgbe_gmii_mode(pdata);
+ } else if (xgbe_use_gmii_2500_mode(pdata)) {
+ xgbe_gmii_2500_mode(pdata);
+ } else {
+ ret = -EINVAL;
+ goto err_irq;
+ }
+
+ /* Set up advertisement registers based on current settings */
+ xgbe_an_init(pdata);
+
+ /* Enable auto-negotiation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
-err_mdiobus_alloc:
- mdiobus_free(mii);
+ return xgbe_phy_config_aneg(pdata);
+
+err_irq:
+ devm_free_irq(pdata->dev, pdata->an_irq, pdata);
return ret;
}
-void xgbe_mdio_unregister(struct xgbe_prv_data *pdata)
+static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+{
+ unsigned int count, reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg |= MDIO_CTRL1_RESET;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ count = 50;
+ do {
+ msleep(20);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ } while ((reg & MDIO_CTRL1_RESET) && --count);
+
+ if (reg & MDIO_CTRL1_RESET)
+ return -ETIMEDOUT;
+
+ /* Disable auto-negotiation for now */
+ xgbe_disable_an(pdata);
+
+ /* Clear auto-negotiation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ return 0;
+}
+
+static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
{
- DBGPR("-->xgbe_mdio_unregister\n");
+ struct device *dev = pdata->dev;
+
+ dev_dbg(dev, "\n************* PHY Reg dump **********************\n");
+
+ dev_dbg(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
+ dev_dbg(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
+ dev_dbg(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
+ dev_dbg(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
+ dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
+ dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
+
+ dev_dbg(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
+ dev_dbg(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
+ dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
+ dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
+ dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 2,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
+ dev_dbg(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
+ MDIO_AN_COMP_STAT,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
+
+ dev_dbg(dev, "\n*************************************************\n");
+}
+
+static void xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ mutex_init(&pdata->an_mutex);
+ INIT_WORK(&pdata->an_irq_work, xgbe_an_irq_work);
+ INIT_WORK(&pdata->an_work, xgbe_an_state_machine);
+ pdata->mdio_mmd = MDIO_MMD_PCS;
+
+ /* Initialize supported features */
+ pdata->phy.supported = SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_Backplane;
+ pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
+ break;
+ case XGBE_SPEEDSET_2500_10000:
+ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+ break;
+ }
- pdata->phydev = NULL;
+ pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBR_FECABLE);
+ pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
+ MDIO_PMA_10GBR_FECABLE_ERRABLE);
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
- module_put(pdata->phy_module);
- pdata->phy_module = NULL;
+ pdata->phy.advertising = pdata->phy.supported;
- mdiobus_unregister(pdata->mii);
- pdata->mii->priv = NULL;
+ pdata->phy.address = 0;
+
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+
+ pdata->phy.link = 0;
+
+ pdata->phy.pause_autoneg = pdata->pause_autoneg;
+ pdata->phy.tx_pause = pdata->tx_pause;
+ pdata->phy.rx_pause = pdata->rx_pause;
+
+ /* Fix up Flow Control advertising */
+ pdata->phy.advertising &= ~ADVERTISED_Pause;
+ pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
+
+ if (pdata->rx_pause) {
+ pdata->phy.advertising |= ADVERTISED_Pause;
+ pdata->phy.advertising |= ADVERTISED_Asym_Pause;
+ }
+
+ if (pdata->tx_pause)
+ pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+
+ if (netif_msg_drv(pdata))
+ xgbe_dump_phy_registers(pdata);
+}
+
+void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
+{
+ phy_if->phy_init = xgbe_phy_init;
- mdiobus_free(pdata->mii);
- pdata->mii = NULL;
+ phy_if->phy_reset = xgbe_phy_reset;
+ phy_if->phy_start = xgbe_phy_start;
+ phy_if->phy_stop = xgbe_phy_stop;
- DBGPR("<--xgbe_mdio_unregister\n");
+ phy_if->phy_status = xgbe_phy_status;
+ phy_if->phy_config_aneg = xgbe_phy_config_aneg;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index e62dfa2deab6..63d72a140053 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -129,7 +129,7 @@
#include <net/dcbnl.h>
#define XGBE_DRV_NAME "amd-xgbe"
-#define XGBE_DRV_VERSION "1.0.0-a"
+#define XGBE_DRV_VERSION "1.0.2"
#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
/* Descriptor related defines */
@@ -178,14 +178,17 @@
#define XGMAC_JUMBO_PACKET_MTU 9000
#define XGMAC_MAX_JUMBO_PACKET 9018
-/* MDIO bus phy name */
-#define XGBE_PHY_NAME "amd_xgbe_phy"
-#define XGBE_PRTAD 0
-
/* Common property names */
#define XGBE_MAC_ADDR_PROPERTY "mac-address"
#define XGBE_PHY_MODE_PROPERTY "phy-mode"
#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
+#define XGBE_SPEEDSET_PROPERTY "amd,speed-set"
+#define XGBE_BLWC_PROPERTY "amd,serdes-blwc"
+#define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
+#define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
+#define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp"
+#define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
+#define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
/* Device-tree clock names */
#define XGBE_DMA_CLOCK "dma_clk"
@@ -241,6 +244,49 @@
#define XGBE_RSS_LOOKUP_TABLE_TYPE 0
#define XGBE_RSS_HASH_KEY_TYPE 1
+/* Auto-negotiation */
+#define XGBE_AN_MS_TIMEOUT 500
+#define XGBE_LINK_TIMEOUT 10
+
+#define XGBE_AN_INT_CMPLT 0x01
+#define XGBE_AN_INC_LINK 0x02
+#define XGBE_AN_PG_RCV 0x04
+#define XGBE_AN_INT_MASK 0x07
+
+/* Rate-change complete wait/retry count */
+#define XGBE_RATECHANGE_COUNT 500
+
+/* Default SerDes settings */
+#define XGBE_SPEED_10000_BLWC 0
+#define XGBE_SPEED_10000_CDR 0x7
+#define XGBE_SPEED_10000_PLL 0x1
+#define XGBE_SPEED_10000_PQ 0x12
+#define XGBE_SPEED_10000_RATE 0x0
+#define XGBE_SPEED_10000_TXAMP 0xa
+#define XGBE_SPEED_10000_WORD 0x7
+#define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1
+#define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f
+
+#define XGBE_SPEED_2500_BLWC 1
+#define XGBE_SPEED_2500_CDR 0x2
+#define XGBE_SPEED_2500_PLL 0x0
+#define XGBE_SPEED_2500_PQ 0xa
+#define XGBE_SPEED_2500_RATE 0x1
+#define XGBE_SPEED_2500_TXAMP 0xf
+#define XGBE_SPEED_2500_WORD 0x1
+#define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3
+#define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0
+
+#define XGBE_SPEED_1000_BLWC 1
+#define XGBE_SPEED_1000_CDR 0x2
+#define XGBE_SPEED_1000_PLL 0x0
+#define XGBE_SPEED_1000_PQ 0xa
+#define XGBE_SPEED_1000_RATE 0x3
+#define XGBE_SPEED_1000_TXAMP 0xf
+#define XGBE_SPEED_1000_WORD 0x1
+#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
+#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
+
struct xgbe_prv_data;
struct xgbe_packet_data {
@@ -334,8 +380,6 @@ struct xgbe_ring_data {
*/
unsigned int state_saved;
struct {
- unsigned int incomplete;
- unsigned int context_next;
struct sk_buff *skb;
unsigned int len;
unsigned int error;
@@ -414,6 +458,13 @@ struct xgbe_channel {
struct xgbe_ring *rx_ring;
} ____cacheline_aligned;
+enum xgbe_state {
+ XGBE_DOWN,
+ XGBE_LINK,
+ XGBE_LINK_INIT,
+ XGBE_LINK_ERR,
+};
+
enum xgbe_int {
XGMAC_INT_DMA_CH_SR_TI,
XGMAC_INT_DMA_CH_SR_TPS,
@@ -445,6 +496,57 @@ enum xgbe_mtl_fifo_size {
XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
};
+enum xgbe_speed {
+ XGBE_SPEED_1000 = 0,
+ XGBE_SPEED_2500,
+ XGBE_SPEED_10000,
+ XGBE_SPEEDS,
+};
+
+enum xgbe_an {
+ XGBE_AN_READY = 0,
+ XGBE_AN_PAGE_RECEIVED,
+ XGBE_AN_INCOMPAT_LINK,
+ XGBE_AN_COMPLETE,
+ XGBE_AN_NO_LINK,
+ XGBE_AN_ERROR,
+};
+
+enum xgbe_rx {
+ XGBE_RX_BPA = 0,
+ XGBE_RX_XNP,
+ XGBE_RX_COMPLETE,
+ XGBE_RX_ERROR,
+};
+
+enum xgbe_mode {
+ XGBE_MODE_KR = 0,
+ XGBE_MODE_KX,
+};
+
+enum xgbe_speedset {
+ XGBE_SPEEDSET_1000_10000 = 0,
+ XGBE_SPEEDSET_2500_10000,
+};
+
+struct xgbe_phy {
+ u32 supported;
+ u32 advertising;
+ u32 lp_advertising;
+
+ int address;
+
+ int autoneg;
+ int speed;
+ int duplex;
+
+ int link;
+
+ int pause_autoneg;
+ int tx_pause;
+ int rx_pause;
+};
+
struct xgbe_mmc_stats {
/* Tx Stats */
u64 txoctetcount_gb;
@@ -492,6 +594,11 @@ struct xgbe_mmc_stats {
u64 rxwatchdogerror;
};
+struct xgbe_ext_stats {
+ u64 tx_tso_packets;
+ u64 rx_split_header_packets;
+};
+
struct xgbe_hw_if {
int (*tx_complete)(struct xgbe_ring_desc *);
@@ -591,6 +698,20 @@ struct xgbe_hw_if {
int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
};
+struct xgbe_phy_if {
+ /* For initial PHY setup */
+ void (*phy_init)(struct xgbe_prv_data *);
+
+ /* For PHY support when setting device up/down */
+ int (*phy_reset)(struct xgbe_prv_data *);
+ int (*phy_start)(struct xgbe_prv_data *);
+ void (*phy_stop)(struct xgbe_prv_data *);
+
+ /* For PHY support while device is up */
+ void (*phy_status)(struct xgbe_prv_data *);
+ int (*phy_config_aneg)(struct xgbe_prv_data *);
+};
+
struct xgbe_desc_if {
int (*alloc_ring_resources)(struct xgbe_prv_data *);
void (*free_ring_resources)(struct xgbe_prv_data *);
@@ -660,6 +781,9 @@ struct xgbe_prv_data {
/* XGMAC/XPCS related mmio registers */
void __iomem *xgmac_regs; /* XGMAC CSRs */
void __iomem *xpcs_regs; /* XPCS MMD registers */
+ void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
+ void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
+ void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
/* Overall device lock */
spinlock_t lock;
@@ -670,10 +794,14 @@ struct xgbe_prv_data {
/* RSS addressing mutex */
struct mutex rss_mutex;
+ /* Flags representing xgbe_state */
+ unsigned long dev_state;
+
int dev_irq;
unsigned int per_channel_irq;
struct xgbe_hw_if hw_if;
+ struct xgbe_phy_if phy_if;
struct xgbe_desc_if desc_if;
/* AXI DMA settings */
@@ -682,6 +810,11 @@ struct xgbe_prv_data {
unsigned int arcache;
unsigned int awcache;
+ /* Service routine support */
+ struct workqueue_struct *dev_workqueue;
+ struct work_struct service_work;
+ struct timer_list service_timer;
+
/* Rings for Tx/Rx on a DMA channel */
struct xgbe_channel *channel;
unsigned int channel_count;
@@ -729,27 +862,12 @@ struct xgbe_prv_data {
u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
u32 rss_options;
- /* MDIO settings */
- struct module *phy_module;
- char *mii_bus_id;
- struct mii_bus *mii;
- int mdio_mmd;
- struct phy_device *phydev;
- int default_autoneg;
- int default_speed;
-
- /* Current PHY settings */
- phy_interface_t phy_mode;
- int phy_link;
- int phy_speed;
- unsigned int phy_tx_pause;
- unsigned int phy_rx_pause;
-
/* Netdev related settings */
unsigned char mac_addr[ETH_ALEN];
netdev_features_t netdev_features;
struct napi_struct napi;
struct xgbe_mmc_stats mmc_stats;
+ struct xgbe_ext_stats ext_stats;
/* Filtering support */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -787,6 +905,54 @@ struct xgbe_prv_data {
/* Keeps track of power mode */
unsigned int power_down;
+ /* Network interface message level setting */
+ u32 msg_enable;
+
+ /* Current PHY settings */
+ phy_interface_t phy_mode;
+ int phy_link;
+ int phy_speed;
+
+ /* MDIO/PHY related settings */
+ struct xgbe_phy phy;
+ int mdio_mmd;
+ unsigned long link_check;
+
+ char an_name[IFNAMSIZ + 32];
+ struct workqueue_struct *an_workqueue;
+
+ int an_irq;
+ struct work_struct an_irq_work;
+
+ unsigned int speed_set;
+
+ /* SerDes UEFI configurable settings.
+ * Switching between modes/speeds requires new values for some
+ * SerDes settings. The values can be supplied as device
+ * properties in array format. The first array entry is for
+ * 1GbE, second for 2.5GbE and third for 10GbE
+ */
+ u32 serdes_blwc[XGBE_SPEEDS];
+ u32 serdes_cdr_rate[XGBE_SPEEDS];
+ u32 serdes_pq_skew[XGBE_SPEEDS];
+ u32 serdes_tx_amp[XGBE_SPEEDS];
+ u32 serdes_dfe_tap_cfg[XGBE_SPEEDS];
+ u32 serdes_dfe_tap_ena[XGBE_SPEEDS];
+
+ /* Auto-negotiation state machine support */
+ struct mutex an_mutex;
+ enum xgbe_an an_result;
+ enum xgbe_an an_state;
+ enum xgbe_rx kr_state;
+ enum xgbe_rx kx_state;
+ struct work_struct an_work;
+ unsigned int an_supported;
+ unsigned int parallel_detect;
+ unsigned int fec_ability;
+ unsigned long an_start;
+
+ unsigned int lpm_ctrl; /* CTRL1 for resume */
+
#ifdef CONFIG_DEBUG_FS
struct dentry *xgbe_debugfs;
@@ -800,6 +966,7 @@ struct xgbe_prv_data {
/* Function prototypes*/
void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
+void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
struct net_device_ops *xgbe_get_netdev_ops(void);
struct ethtool_ops *xgbe_get_ethtool_ops(void);
@@ -807,14 +974,11 @@ struct ethtool_ops *xgbe_get_ethtool_ops(void);
const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
#endif
-int xgbe_mdio_register(struct xgbe_prv_data *);
-void xgbe_mdio_unregister(struct xgbe_prv_data *);
-void xgbe_dump_phy_registers(struct xgbe_prv_data *);
void xgbe_ptp_register(struct xgbe_prv_data *);
void xgbe_ptp_unregister(struct xgbe_prv_data *);
-void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
- unsigned int);
-void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
+void xgbe_dump_tx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
+ unsigned int, unsigned int, unsigned int);
+void xgbe_dump_rx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
unsigned int);
void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
void xgbe_get_all_hw_features(struct xgbe_prv_data *);
@@ -831,18 +995,6 @@ static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
#endif /* CONFIG_DEBUG_FS */
-/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */
-#if 0
-#define XGMAC_ENABLE_TX_DESC_DUMP
-#define XGMAC_ENABLE_RX_DESC_DUMP
-#endif
-
-/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */
-#if 0
-#define XGMAC_ENABLE_TX_PKT_DUMP
-#define XGMAC_ENABLE_RX_PKT_DUMP
-#endif
-
/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
#if 0
#define YDEBUG
@@ -852,10 +1004,8 @@ static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
/* For debug prints */
#ifdef YDEBUG
#define DBGPR(x...) pr_alert(x)
-#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x)
#else
#define DBGPR(x...) do { } while (0)
-#define DBGPHY_REGS(x...) do { } while (0)
#endif
#ifdef YDEBUG_MDIO
diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile
index 68be565548c0..700b5abe5de5 100644
--- a/drivers/net/ethernet/apm/xgene/Makefile
+++ b/drivers/net/ethernet/apm/xgene/Makefile
@@ -3,5 +3,5 @@
#
xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \
- xgene_enet_main.o xgene_enet_ethtool.o
+ xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o
obj-$(CONFIG_NET_XGENE) += xgene-enet.o
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index b927021c6c40..25873d142b95 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -87,10 +87,11 @@ static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
{
+ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
int i;
xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
- for (i = 0; i < NUM_RING_CONFIG; i++) {
+ for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
ring->state[i]);
}
@@ -98,7 +99,7 @@ static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
{
- memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG);
+ memset(ring->state, 0, sizeof(ring->state));
xgene_enet_write_ring_state(ring);
}
@@ -141,8 +142,8 @@ static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
}
-struct xgene_enet_desc_ring *xgene_enet_setup_ring(
- struct xgene_enet_desc_ring *ring)
+static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+ struct xgene_enet_desc_ring *ring)
{
u32 size = ring->size;
u32 i, data;
@@ -168,7 +169,7 @@ struct xgene_enet_desc_ring *xgene_enet_setup_ring(
return ring;
}
-void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
{
u32 data;
bool is_bufpool;
@@ -186,6 +187,22 @@ out:
xgene_enet_clr_ring_state(ring);
}
+static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
+{
+ iowrite32(count, ring->cmd);
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+ u32 __iomem *cmd_base = ring->cmd_base;
+ u32 ring_state, num_msgs;
+
+ ring_state = ioread32(&cmd_base[1]);
+ num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
+
+ return num_msgs;
+}
+
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status)
@@ -803,3 +820,12 @@ struct xgene_port_ops xgene_gport_ops = {
.cle_bypass = xgene_enet_cle_bypass,
.shutdown = xgene_gport_shutdown,
};
+
+struct xgene_ring_ops xgene_ring1_ops = {
+ .num_ring_config = NUM_RING_CONFIG,
+ .num_ring_id_shift = 6,
+ .setup = xgene_enet_setup_ring,
+ .clear = xgene_enet_clear_ring,
+ .wr_cmd = xgene_enet_wr_cmd,
+ .len = xgene_enet_ring_len,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index d9bc89d69266..541bed056012 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -26,6 +26,7 @@
struct xgene_enet_pdata;
struct xgene_enet_stats;
+struct xgene_enet_desc_ring;
/* clears and then set bits */
static inline void xgene_set_bits(u32 *dst, u32 val, u32 start, u32 len)
@@ -101,8 +102,8 @@ enum xgene_enet_rm {
#define BLOCK_ETH_CSR_OFFSET 0x2000
#define BLOCK_ETH_RING_IF_OFFSET 0x9000
+#define BLOCK_ETH_CLKRST_CSR_OFFSET 0xc000
#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000
-
#define BLOCK_ETH_MAC_OFFSET 0x0000
#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
@@ -261,6 +262,7 @@ enum xgene_enet_ring_type {
enum xgene_ring_owner {
RING_OWNER_ETH0,
+ RING_OWNER_ETH1,
RING_OWNER_CPU = 15,
RING_OWNER_INVALID
};
@@ -314,9 +316,6 @@ static inline u16 xgene_enet_get_numslots(u16 id, u32 size)
size / WORK_DESC_SIZE;
}
-struct xgene_enet_desc_ring *xgene_enet_setup_ring(
- struct xgene_enet_desc_ring *ring);
-void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring);
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status);
@@ -327,5 +326,6 @@ bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
extern struct xgene_mac_ops xgene_gmac_ops;
extern struct xgene_port_ops xgene_gport_ops;
+extern struct xgene_ring_ops xgene_ring1_ops;
#endif /* __XGENE_ENET_HW_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 40d3530d7f30..1bb317532f75 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -28,6 +28,8 @@
#define RES_RING_CSR 1
#define RES_RING_CMD 2
+static const struct of_device_id xgene_enet_of_match[];
+
static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
struct xgene_enet_raw_desc16 *raw_desc;
@@ -48,6 +50,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
{
struct sk_buff *skb;
struct xgene_enet_raw_desc16 *raw_desc;
+ struct xgene_enet_pdata *pdata;
struct net_device *ndev;
struct device *dev;
dma_addr_t dma_addr;
@@ -58,6 +61,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
ndev = buf_pool->ndev;
dev = ndev_to_dev(buf_pool->ndev);
+ pdata = netdev_priv(ndev);
bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
len = XGENE_ENET_MAX_MTU;
@@ -82,7 +86,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
tail = (tail + 1) & slots;
}
- iowrite32(nbuf, buf_pool->cmd);
+ pdata->ring_ops->wr_cmd(buf_pool, nbuf);
buf_pool->tail = tail;
return 0;
@@ -102,26 +106,16 @@ static u8 xgene_enet_hdr_len(const void *data)
return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
}
-static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
-{
- u32 __iomem *cmd_base = ring->cmd_base;
- u32 ring_state, num_msgs;
-
- ring_state = ioread32(&cmd_base[1]);
- num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN);
-
- return num_msgs >> NUMMSGSINQ_POS;
-}
-
static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
+ struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
struct xgene_enet_raw_desc16 *raw_desc;
u32 slots = buf_pool->slots - 1;
u32 tail = buf_pool->tail;
u32 userinfo;
int i, len;
- len = xgene_enet_ring_len(buf_pool);
+ len = pdata->ring_ops->len(buf_pool);
for (i = 0; i < len; i++) {
tail = (tail - 1) & slots;
raw_desc = &buf_pool->raw_desc16[tail];
@@ -131,7 +125,7 @@ static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
}
- iowrite32(-len, buf_pool->cmd);
+ pdata->ring_ops->wr_cmd(buf_pool, -len);
buf_pool->tail = tail;
}
@@ -263,8 +257,8 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
u32 tx_level, cq_level;
- tx_level = xgene_enet_ring_len(tx_ring);
- cq_level = xgene_enet_ring_len(cp_ring);
+ tx_level = pdata->ring_ops->len(tx_ring);
+ cq_level = pdata->ring_ops->len(cp_ring);
if (unlikely(tx_level > pdata->tx_qcnt_hi ||
cq_level > pdata->cp_qcnt_hi)) {
netif_stop_queue(ndev);
@@ -276,7 +270,7 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- iowrite32(1, tx_ring->cmd);
+ pdata->ring_ops->wr_cmd(tx_ring, 1);
skb_tx_timestamp(skb);
tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
@@ -389,11 +383,11 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
} while (--budget);
if (likely(count)) {
- iowrite32(-count, ring->cmd);
+ pdata->ring_ops->wr_cmd(ring, -count);
ring->head = head;
if (netif_queue_stopped(ring->ndev)) {
- if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low)
+ if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low)
netif_wake_queue(ring->ndev);
}
}
@@ -510,6 +504,7 @@ static int xgene_enet_open(struct net_device *ndev)
else
schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
+ netif_carrier_off(ndev);
netif_start_queue(ndev);
return ret;
@@ -545,7 +540,7 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
pdata = netdev_priv(ring->ndev);
dev = ndev_to_dev(ring->ndev);
- xgene_enet_clear_ring(ring);
+ pdata->ring_ops->clear(ring);
dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
}
@@ -598,15 +593,17 @@ static int xgene_enet_get_ring_size(struct device *dev,
static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
{
+ struct xgene_enet_pdata *pdata;
struct device *dev;
if (!ring)
return;
dev = ndev_to_dev(ring->ndev);
+ pdata = netdev_priv(ring->ndev);
if (ring->desc_addr) {
- xgene_enet_clear_ring(ring);
+ pdata->ring_ops->clear(ring);
dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
}
devm_kfree(dev, ring);
@@ -637,6 +634,25 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
}
}
+static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring)
+{
+ if ((pdata->enet_id == XGENE_ENET2) &&
+ (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
+ return true;
+ }
+
+ return false;
+}
+
+static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring)
+{
+ u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
+
+ return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
+}
+
static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
struct net_device *ndev, u32 ring_num,
enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
@@ -668,9 +684,20 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
}
ring->size = size;
- ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
+ if (is_irq_mbox_required(pdata, ring)) {
+ ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
+ &ring->irq_mbox_dma, GFP_KERNEL);
+ if (!ring->irq_mbox_addr) {
+ dma_free_coherent(dev, size, ring->desc_addr,
+ ring->dma);
+ devm_kfree(dev, ring);
+ return NULL;
+ }
+ }
+
+ ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
- ring = xgene_enet_setup_ring(ring);
+ ring = pdata->ring_ops->setup(ring);
netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
ring->num, ring->size, ring->id, ring->slots);
@@ -682,12 +709,34 @@ static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
return (owner << 6) | (bufnum & GENMASK(5, 0));
}
+static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
+{
+ enum xgene_ring_owner owner;
+
+ if (p->enet_id == XGENE_ENET1) {
+ switch (p->phy_mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ owner = RING_OWNER_ETH0;
+ break;
+ default:
+ owner = (!p->port_id) ? RING_OWNER_ETH0 :
+ RING_OWNER_ETH1;
+ break;
+ }
+ } else {
+ owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
+ }
+
+ return owner;
+}
+
static int xgene_enet_create_desc_rings(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct device *dev = ndev_to_dev(ndev);
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
struct xgene_enet_desc_ring *buf_pool = NULL;
+ enum xgene_ring_owner owner;
u8 cpu_bufnum = pdata->cpu_bufnum;
u8 eth_bufnum = pdata->eth_bufnum;
u8 bp_bufnum = pdata->bp_bufnum;
@@ -696,6 +745,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
int ret;
/* allocate rx descriptor ring */
+ owner = xgene_derive_ring_owner(pdata);
ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB, ring_id);
@@ -705,7 +755,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
}
/* allocate buffer pool for receiving packets */
- ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++);
+ owner = xgene_derive_ring_owner(pdata);
+ ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_2KB, ring_id);
if (!buf_pool) {
@@ -734,7 +785,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
pdata->rx_ring = rx_ring;
/* allocate tx descriptor ring */
- ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++);
+ owner = xgene_derive_ring_owner(pdata);
+ ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB, ring_id);
if (!tx_ring) {
@@ -824,14 +876,21 @@ static int xgene_get_port_id(struct device *dev, struct xgene_enet_pdata *pdata)
int ret;
ret = device_property_read_u32(dev, "port-id", &id);
- if (!ret && id > 1) {
- dev_err(dev, "Incorrect port-id specified\n");
- return -ENODEV;
- }
- pdata->port_id = id;
+ switch (ret) {
+ case -EINVAL:
+ pdata->port_id = 0;
+ ret = 0;
+ break;
+ case 0:
+ pdata->port_id = id & BIT(0);
+ break;
+ default:
+ dev_err(dev, "Incorrect port-id specified: errno: %d\n", ret);
+ break;
+ }
- return 0;
+ return ret;
}
static int xgene_get_mac_address(struct device *dev,
@@ -876,6 +935,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
struct device *dev;
struct resource *res;
void __iomem *base_addr;
+ u32 offset;
int ret;
pdev = pdata->pdev;
@@ -962,14 +1022,20 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
pdata->clk = NULL;
}
- base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
+ base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
+ else
+ base_addr = pdata->base_addr;
pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
- pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
+ offset = (pdata->enet_id == XGENE_ENET1) ?
+ BLOCK_ETH_MAC_CSR_OFFSET :
+ X2_BLOCK_ETH_MAC_CSR_OFFSET;
+ pdata->mcx_mac_csr_addr = base_addr + offset;
} else {
pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
@@ -1034,23 +1100,44 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
break;
}
- switch (pdata->port_id) {
- case 0:
- pdata->cpu_bufnum = START_CPU_BUFNUM_0;
- pdata->eth_bufnum = START_ETH_BUFNUM_0;
- pdata->bp_bufnum = START_BP_BUFNUM_0;
- pdata->ring_num = START_RING_NUM_0;
- break;
- case 1:
- pdata->cpu_bufnum = START_CPU_BUFNUM_1;
- pdata->eth_bufnum = START_ETH_BUFNUM_1;
- pdata->bp_bufnum = START_BP_BUFNUM_1;
- pdata->ring_num = START_RING_NUM_1;
- break;
- default:
- break;
+ if (pdata->enet_id == XGENE_ENET1) {
+ switch (pdata->port_id) {
+ case 0:
+ pdata->cpu_bufnum = START_CPU_BUFNUM_0;
+ pdata->eth_bufnum = START_ETH_BUFNUM_0;
+ pdata->bp_bufnum = START_BP_BUFNUM_0;
+ pdata->ring_num = START_RING_NUM_0;
+ break;
+ case 1:
+ pdata->cpu_bufnum = START_CPU_BUFNUM_1;
+ pdata->eth_bufnum = START_ETH_BUFNUM_1;
+ pdata->bp_bufnum = START_BP_BUFNUM_1;
+ pdata->ring_num = START_RING_NUM_1;
+ break;
+ default:
+ break;
+ }
+ pdata->ring_ops = &xgene_ring1_ops;
+ } else {
+ switch (pdata->port_id) {
+ case 0:
+ pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
+ pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
+ pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
+ pdata->ring_num = X2_START_RING_NUM_0;
+ break;
+ case 1:
+ pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
+ pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
+ pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
+ pdata->ring_num = X2_START_RING_NUM_1;
+ break;
+ default:
+ break;
+ }
+ pdata->rm = RM0;
+ pdata->ring_ops = &xgene_ring2_ops;
}
-
}
static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
@@ -1086,6 +1173,9 @@ static int xgene_enet_probe(struct platform_device *pdev)
struct xgene_enet_pdata *pdata;
struct device *dev = &pdev->dev;
struct xgene_mac_ops *mac_ops;
+#ifdef CONFIG_OF
+ const struct of_device_id *of_id;
+#endif
int ret;
ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
@@ -1104,6 +1194,17 @@ static int xgene_enet_probe(struct platform_device *pdev)
NETIF_F_GSO |
NETIF_F_GRO;
+#ifdef CONFIG_OF
+ of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
+ if (of_id) {
+ pdata->enet_id = (enum xgene_enet_id)of_id->data;
+ if (!pdata->enet_id) {
+ free_netdev(ndev);
+ return -ENODEV;
+ }
+ }
+#endif
+
ret = xgene_enet_get_resources(pdata);
if (ret)
goto err;
@@ -1175,9 +1276,11 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id xgene_enet_of_match[] = {
- {.compatible = "apm,xgene-enet",},
- {.compatible = "apm,xgene1-sgenet",},
- {.compatible = "apm,xgene1-xgenet",},
+ {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
+ {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
{},
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 8f3d232b09bc..1c85fc87703a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -35,6 +35,7 @@
#include <linux/if_vlan.h>
#include <linux/phy.h>
#include "xgene_enet_hw.h"
+#include "xgene_enet_ring2.h"
#define XGENE_DRV_VERSION "v1.0"
#define XGENE_ENET_MAX_MTU 1536
@@ -51,12 +52,26 @@
#define START_BP_BUFNUM_1 0x2A
#define START_RING_NUM_1 264
+#define X2_START_CPU_BUFNUM_0 0
+#define X2_START_ETH_BUFNUM_0 0
+#define X2_START_BP_BUFNUM_0 0x20
+#define X2_START_RING_NUM_0 0
+#define X2_START_CPU_BUFNUM_1 0xc
+#define X2_START_ETH_BUFNUM_1 0
+#define X2_START_BP_BUFNUM_1 0x20
+#define X2_START_RING_NUM_1 256
+
#define IRQ_ID_SIZE 16
#define XGENE_MAX_TXC_RINGS 1
#define PHY_POLL_LINK_ON (10 * HZ)
#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
+enum xgene_enet_id {
+ XGENE_ENET1 = 1,
+ XGENE_ENET2
+};
+
/* software context of a descriptor ring */
struct xgene_enet_desc_ring {
struct net_device *ndev;
@@ -68,10 +83,12 @@ struct xgene_enet_desc_ring {
u16 irq;
char irq_name[IRQ_ID_SIZE];
u32 size;
- u32 state[NUM_RING_CONFIG];
+ u32 state[X2_NUM_RING_CONFIG];
void __iomem *cmd_base;
void __iomem *cmd;
dma_addr_t dma;
+ dma_addr_t irq_mbox_dma;
+ void *irq_mbox_addr;
u16 dst_ring_num;
u8 nbufpool;
struct sk_buff *(*rx_skb);
@@ -105,6 +122,15 @@ struct xgene_port_ops {
void (*shutdown)(struct xgene_enet_pdata *pdata);
};
+struct xgene_ring_ops {
+ u8 num_ring_config;
+ u8 num_ring_id_shift;
+ struct xgene_enet_desc_ring * (*setup)(struct xgene_enet_desc_ring *);
+ void (*clear)(struct xgene_enet_desc_ring *);
+ void (*wr_cmd)(struct xgene_enet_desc_ring *, int);
+ u32 (*len)(struct xgene_enet_desc_ring *);
+};
+
/* ethernet private data */
struct xgene_enet_pdata {
struct net_device *ndev;
@@ -113,6 +139,7 @@ struct xgene_enet_pdata {
int phy_speed;
struct clk *clk;
struct platform_device *pdev;
+ enum xgene_enet_id enet_id;
struct xgene_enet_desc_ring *tx_ring;
struct xgene_enet_desc_ring *rx_ring;
char *dev_name;
@@ -136,6 +163,7 @@ struct xgene_enet_pdata {
struct rtnl_link_stats64 stats;
struct xgene_mac_ops *mac_ops;
struct xgene_port_ops *port_ops;
+ struct xgene_ring_ops *ring_ops;
struct delayed_work link_work;
u32 port_id;
u8 cpu_bufnum;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
new file mode 100644
index 000000000000..0b6896bb351e
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
@@ -0,0 +1,200 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+#include "xgene_enet_ring2.h"
+
+static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
+{
+ u32 *ring_cfg = ring->state;
+ u64 addr = ring->dma;
+
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
+ ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
+ ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
+ }
+ ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1);
+
+ addr >>= 8;
+ ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
+
+ addr >>= 27;
+ ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize)
+ | ACCEPTLERR
+ | SET_VAL(RINGADDRH, addr);
+ ring_cfg[4] |= SET_VAL(X2_SELTHRSH, 1);
+ ring_cfg[5] |= SET_BIT(X2_QBASE_AM) | SET_BIT(X2_MSG_AM);
+}
+
+static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
+{
+ u32 *ring_cfg = ring->state;
+ bool is_bufpool;
+ u32 val;
+
+ is_bufpool = xgene_enet_is_bufpool(ring->id);
+ val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
+ ring_cfg[4] |= SET_VAL(X2_RINGTYPE, val);
+ if (is_bufpool)
+ ring_cfg[3] |= SET_VAL(RINGMODE, BUFPOOL_MODE);
+}
+
+static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
+{
+ u32 *ring_cfg = ring->state;
+
+ ring_cfg[3] |= RECOMBBUF;
+ ring_cfg[4] |= SET_VAL(X2_RECOMTIMEOUT, 0x7);
+}
+
+static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
+ u32 offset, u32 data)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+
+ iowrite32(data, pdata->ring_csr_addr + offset);
+}
+
+static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+ int i;
+
+ xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
+ for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
+ xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
+ ring->state[i]);
+ }
+}
+
+static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
+{
+ memset(ring->state, 0, sizeof(ring->state));
+ xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
+{
+ enum xgene_ring_owner owner;
+
+ xgene_enet_ring_set_type(ring);
+
+ owner = xgene_enet_ring_owner(ring->id);
+ if (owner == RING_OWNER_ETH0 || owner == RING_OWNER_ETH1)
+ xgene_enet_ring_set_recombbuf(ring);
+
+ xgene_enet_ring_init(ring);
+ xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
+{
+ u32 ring_id_val, ring_id_buf;
+ bool is_bufpool;
+
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)
+ return;
+
+ is_bufpool = xgene_enet_is_bufpool(ring->id);
+
+ ring_id_val = ring->id & GENMASK(9, 0);
+ ring_id_val |= OVERWRITE;
+
+ ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
+ ring_id_buf |= PREFETCH_BUF_EN;
+ if (is_bufpool)
+ ring_id_buf |= IS_BUFFER_POOL;
+
+ xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
+ xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
+}
+
+static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
+{
+ u32 ring_id;
+
+ ring_id = ring->id | OVERWRITE;
+ xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
+ xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
+}
+
+static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+ struct xgene_enet_desc_ring *ring)
+{
+ bool is_bufpool;
+ u32 addr, i;
+
+ xgene_enet_clr_ring_state(ring);
+ xgene_enet_set_ring_state(ring);
+ xgene_enet_set_ring_id(ring);
+
+ ring->slots = xgene_enet_get_numslots(ring->id, ring->size);
+
+ is_bufpool = xgene_enet_is_bufpool(ring->id);
+ if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
+ return ring;
+
+ addr = CSR_VMID0_INTR_MBOX + (4 * (ring->id & RING_BUFNUM_MASK));
+ xgene_enet_ring_wr32(ring, addr, ring->irq_mbox_dma >> 10);
+
+ for (i = 0; i < ring->slots; i++)
+ xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
+
+ return ring;
+}
+
+static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+{
+ xgene_enet_clr_desc_ring_id(ring);
+ xgene_enet_clr_ring_state(ring);
+}
+
+static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
+{
+ u32 data = 0;
+
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
+ data = SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK) |
+ INTR_CLEAR;
+ }
+ data |= (count & GENMASK(16, 0));
+
+ iowrite32(data, ring->cmd);
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+ u32 __iomem *cmd_base = ring->cmd_base;
+ u32 ring_state, num_msgs;
+
+ ring_state = ioread32(&cmd_base[1]);
+ num_msgs = GET_VAL(X2_NUMMSGSINQ, ring_state);
+
+ return num_msgs;
+}
+
+struct xgene_ring_ops xgene_ring2_ops = {
+ .num_ring_config = X2_NUM_RING_CONFIG,
+ .num_ring_id_shift = 13,
+ .setup = xgene_enet_setup_ring,
+ .clear = xgene_enet_clear_ring,
+ .wr_cmd = xgene_enet_wr_cmd,
+ .len = xgene_enet_ring_len,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h
new file mode 100644
index 000000000000..8b235db23c42
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h
@@ -0,0 +1,49 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_RING2_H__
+#define __XGENE_ENET_RING2_H__
+
+#include "xgene_enet_main.h"
+
+#define X2_NUM_RING_CONFIG 6
+
+#define INTR_MBOX_SIZE 1024
+#define CSR_VMID0_INTR_MBOX 0x0270
+#define INTR_CLEAR BIT(23)
+
+#define X2_MSG_AM_POS 10
+#define X2_QBASE_AM_POS 11
+#define X2_INTLINE_POS 24
+#define X2_INTLINE_LEN 5
+#define X2_CFGCRID_POS 29
+#define X2_CFGCRID_LEN 3
+#define X2_SELTHRSH_POS 7
+#define X2_SELTHRSH_LEN 3
+#define X2_RINGTYPE_POS 23
+#define X2_RINGTYPE_LEN 2
+#define X2_DEQINTEN_POS 29
+#define X2_RECOMTIMEOUT_POS 0
+#define X2_RECOMTIMEOUT_LEN 7
+#define X2_NUMMSGSINQ_POS 0
+#define X2_NUMMSGSINQ_LEN 17
+
+extern struct xgene_ring_ops xgene_ring2_ops;
+
+#endif /* __XGENE_ENET_RING2_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index f27fb6f2a93b..ff240b3cb2b8 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -21,6 +21,7 @@
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_sgmac.h"
+#include "xgene_enet_xgmac.h"
static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
{
@@ -39,6 +40,14 @@ static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
iowrite32(val, p->eth_diag_csr_addr + offset);
}
+static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
+ u32 offset, u32 val)
+{
+ void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
+
+ iowrite32(val, addr);
+}
+
static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
u32 wr_addr, u32 wr_data)
{
@@ -140,8 +149,9 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
{
- u32 val = 0xffffffff;
+ u32 val;
+ val = (p->enet_id == XGENE_ENET1) ? 0xffffffff : 0;
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
}
@@ -227,6 +237,8 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
{
u32 data, loop = 10;
u32 offset = p->port_id * 4;
+ u32 enet_spare_cfg_reg, rsif_config_reg;
+ u32 cfg_bypass_reg, rx_dv_gate_reg;
xgene_sgmac_reset(p);
@@ -239,7 +251,7 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
SGMII_STATUS_ADDR >> 2);
if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
break;
- usleep_range(10, 20);
+ usleep_range(1000, 2000);
}
if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
netdev_err(p->ndev, "Auto-negotiation failed\n");
@@ -249,33 +261,38 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
- data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR);
+ if (p->enet_id == XGENE_ENET1) {
+ enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR;
+ rsif_config_reg = RSIF_CONFIG_REG_ADDR;
+ cfg_bypass_reg = CFG_BYPASS_ADDR;
+ rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR;
+ } else {
+ enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR;
+ rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR;
+ cfg_bypass_reg = XG_CFG_BYPASS_ADDR;
+ rx_dv_gate_reg = XG_MCX_RX_DV_GATE_REG_0_ADDR;
+ }
+
+ data = xgene_enet_rd_csr(p, enet_spare_cfg_reg);
data |= MPA_IDLE_WITH_QMI_EMPTY;
- xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data);
+ xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
xgene_sgmac_set_mac_addr(p);
- data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR);
- data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
- xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data);
-
/* Adjust MDC clock frequency */
data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
MGMT_CLOCK_SEL_SET(&data, 7);
xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
/* Enable drop if bufpool not available */
- data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR);
+ data = xgene_enet_rd_csr(p, rsif_config_reg);
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
- xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data);
-
- /* Rtype should be copied from FP */
- xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0);
+ xgene_enet_wr_csr(p, rsif_config_reg, data);
/* Bypass traffic gating */
- xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR + offset, TX_PORT0);
- xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX);
- xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR + offset, RESUME_RX0);
+ xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
+ xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
+ xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg + offset, RESUME_RX0);
}
static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
@@ -331,14 +348,23 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
u32 dst_ring_num, u16 bufpool_id)
{
u32 data, fpsel;
+ u32 cle_bypass_reg0, cle_bypass_reg1;
u32 offset = p->port_id * MAC_OFFSET;
+ if (p->enet_id == XGENE_ENET1) {
+ cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
+ cle_bypass_reg1 = CLE_BYPASS_REG1_0_ADDR;
+ } else {
+ cle_bypass_reg0 = XCLE_BYPASS_REG0_ADDR;
+ cle_bypass_reg1 = XCLE_BYPASS_REG1_ADDR;
+ }
+
data = CFG_CLE_BYPASS_EN0;
- xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR + offset, data);
+ xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
- xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR + offset, data);
+ xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
}
static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index a18a9d1f1143..27ba2fe3fca6 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -122,7 +122,6 @@ static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
return true;
}
-
static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
u32 rd_addr, u32 *rd_data)
{
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index 5a5296a6d1df..bf0a99435737 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -21,9 +21,28 @@
#ifndef __XGENE_ENET_XGMAC_H__
#define __XGENE_ENET_XGMAC_H__
+#define X2_BLOCK_ETH_MAC_CSR_OFFSET 0x3000
#define BLOCK_AXG_MAC_OFFSET 0x0800
#define BLOCK_AXG_MAC_CSR_OFFSET 0x2000
+#define XGENET_CONFIG_REG_ADDR 0x20
+#define XGENET_SRST_ADDR 0x00
+#define XGENET_CLKEN_ADDR 0x08
+
+#define CSR_CLK BIT(0)
+#define XGENET_CLK BIT(1)
+#define PCS_CLK BIT(3)
+#define AN_REF_CLK BIT(4)
+#define AN_CLK BIT(5)
+#define AD_CLK BIT(6)
+
+#define CSR_RST BIT(0)
+#define XGENET_RST BIT(1)
+#define PCS_RST BIT(3)
+#define AN_REF_RST BIT(4)
+#define AN_RST BIT(5)
+#define AD_RST BIT(6)
+
#define AXGMAC_CONFIG_0 0x0000
#define AXGMAC_CONFIG_1 0x0004
#define HSTMACRST BIT(31)
@@ -38,6 +57,7 @@
#define HSTMACADR_MSW_ADDR 0x0014
#define HSTMAXFRAME_LENGTH_ADDR 0x0020
+#define XG_MCX_RX_DV_GATE_REG_0_ADDR 0x0004
#define XG_RSIF_CONFIG_REG_ADDR 0x00a0
#define XCLE_BYPASS_REG0_ADDR 0x0160
#define XCLE_BYPASS_REG1_ADDR 0x0164
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 783543ad1fcf..909ad7a0d480 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -456,6 +456,67 @@ static int bcm_sysport_set_wol(struct net_device *dev,
return 0;
}
+static int bcm_sysport_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
+
+ ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
+ ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
+
+ reg = rdma_readl(priv, RDMA_MBDONE_INTR);
+
+ ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
+ ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
+
+ return 0;
+}
+
+static int bcm_sysport_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+
+ /* Base system clock is 125Mhz, DMA timeout is this reference clock
+ * divided by 1024, which yield roughly 8.192 us, our maximum value has
+ * to fit in the RING_TIMEOUT_MASK (16 bits).
+ */
+ if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
+ ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
+ ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
+ ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
+ (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
+ return -EINVAL;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
+ reg &= ~(RING_INTR_THRESH_MASK |
+ RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
+ reg |= ec->tx_max_coalesced_frames;
+ reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
+ RING_TIMEOUT_SHIFT;
+ tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
+ }
+
+ reg = rdma_readl(priv, RDMA_MBDONE_INTR);
+ reg &= ~(RDMA_INTR_THRESH_MASK |
+ RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
+ reg |= ec->rx_max_coalesced_frames;
+ reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
+ RDMA_TIMEOUT_SHIFT;
+ rdma_writel(priv, reg, RDMA_MBDONE_INTR);
+
+ return 0;
+}
+
static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
{
dev_kfree_skb_any(cb->skb);
@@ -463,67 +524,70 @@ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
dma_unmap_addr_set(cb, dma_addr, 0);
}
-static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
- struct bcm_sysport_cb *cb)
+static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_cb *cb)
{
struct device *kdev = &priv->pdev->dev;
struct net_device *ndev = priv->netdev;
+ struct sk_buff *skb, *rx_skb;
dma_addr_t mapping;
- int ret;
- cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
- if (!cb->skb) {
+ /* Allocate a new SKB for a new packet */
+ skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+ if (!skb) {
+ priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
- return -ENOMEM;
+ return NULL;
}
- mapping = dma_map_single(kdev, cb->skb->data,
+ mapping = dma_map_single(kdev, skb->data,
RX_BUF_LENGTH, DMA_FROM_DEVICE);
- ret = dma_mapping_error(kdev, mapping);
- if (ret) {
+ if (dma_mapping_error(kdev, mapping)) {
priv->mib.rx_dma_failed++;
- bcm_sysport_free_cb(cb);
+ dev_kfree_skb_any(skb);
netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
- return ret;
+ return NULL;
}
- dma_unmap_addr_set(cb, dma_addr, mapping);
- dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+ /* Grab the current SKB on the ring */
+ rx_skb = cb->skb;
+ if (likely(rx_skb))
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
- priv->rx_bd_assign_index++;
- priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
- priv->rx_bd_assign_ptr = priv->rx_bds +
- (priv->rx_bd_assign_index * DESC_SIZE);
+ /* Put the new SKB on the ring */
+ cb->skb = skb;
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_desc_set_addr(priv, cb->bd_addr, mapping);
netif_dbg(priv, rx_status, ndev, "RX refill\n");
- return 0;
+ /* Return the current SKB to the caller */
+ return rx_skb;
}
static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
{
struct bcm_sysport_cb *cb;
- int ret = 0;
+ struct sk_buff *skb;
unsigned int i;
for (i = 0; i < priv->num_rx_bds; i++) {
- cb = &priv->rx_cbs[priv->rx_bd_assign_index];
- if (cb->skb)
- continue;
-
- ret = bcm_sysport_rx_refill(priv, cb);
- if (ret)
- break;
+ cb = &priv->rx_cbs[i];
+ skb = bcm_sysport_rx_refill(priv, cb);
+ if (skb)
+ dev_kfree_skb(skb);
+ if (!cb->skb)
+ return -ENOMEM;
}
- return ret;
+ return 0;
}
/* Poll the hardware for up to budget packets to process */
static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int budget)
{
- struct device *kdev = &priv->pdev->dev;
struct net_device *ndev = priv->netdev;
unsigned int processed = 0, to_process;
struct bcm_sysport_cb *cb;
@@ -531,7 +595,6 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int p_index;
u16 len, status;
struct bcm_rsb *rsb;
- int ret;
/* Determine how much we should process since last call */
p_index = rdma_readl(priv, RDMA_PROD_INDEX);
@@ -549,13 +612,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
while ((processed < to_process) && (processed < budget)) {
cb = &priv->rx_cbs[priv->rx_read_ptr];
- skb = cb->skb;
+ skb = bcm_sysport_rx_refill(priv, cb);
- processed++;
- priv->rx_read_ptr++;
-
- if (priv->rx_read_ptr == priv->num_rx_bds)
- priv->rx_read_ptr = 0;
/* We do not have a backing SKB, so we do not a corresponding
* DMA mapping for this incoming packet since
@@ -566,12 +624,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
netif_err(priv, rx_err, ndev, "out of memory!\n");
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
- goto refill;
+ goto next;
}
- dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
- RX_BUF_LENGTH, DMA_FROM_DEVICE);
-
/* Extract the Receive Status Block prepended */
rsb = (struct bcm_rsb *)skb->data;
len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
@@ -583,12 +638,20 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
p_index, priv->rx_c_index, priv->rx_read_ptr,
len, status);
+ if (unlikely(len > RX_BUF_LENGTH)) {
+ netif_err(priv, rx_status, ndev, "oversized packet\n");
+ ndev->stats.rx_length_errors++;
+ ndev->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
netif_err(priv, rx_status, ndev, "fragmented packet!\n");
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
- bcm_sysport_free_cb(cb);
- goto refill;
+ dev_kfree_skb_any(skb);
+ goto next;
}
if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
@@ -597,8 +660,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
ndev->stats.rx_over_errors++;
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
- bcm_sysport_free_cb(cb);
- goto refill;
+ dev_kfree_skb_any(skb);
+ goto next;
}
skb_put(skb, len);
@@ -625,10 +688,12 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
ndev->stats.rx_bytes += len;
napi_gro_receive(&priv->napi, skb);
-refill:
- ret = bcm_sysport_rx_refill(priv, cb);
- if (ret)
- priv->mib.alloc_rx_buff_failed++;
+next:
+ processed++;
+ priv->rx_read_ptr++;
+
+ if (priv->rx_read_ptr == priv->num_rx_bds)
+ priv->rx_read_ptr = 0;
}
return processed;
@@ -1269,14 +1334,14 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
{
+ struct bcm_sysport_cb *cb;
u32 reg;
int ret;
+ int i;
/* Initialize SW view of the RX ring */
priv->num_rx_bds = NUM_RX_DESC;
priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
- priv->rx_bd_assign_ptr = priv->rx_bds;
- priv->rx_bd_assign_index = 0;
priv->rx_c_index = 0;
priv->rx_read_ptr = 0;
priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
@@ -1286,6 +1351,11 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
return -ENOMEM;
}
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = priv->rx_cbs + i;
+ cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
+ }
+
ret = bcm_sysport_alloc_rx_bufs(priv);
if (ret) {
netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
@@ -1641,6 +1711,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = {
.get_sset_count = bcm_sysport_get_sset_count,
.get_wol = bcm_sysport_get_wol,
.set_wol = bcm_sysport_set_wol,
+ .get_coalesce = bcm_sysport_get_coalesce,
+ .set_coalesce = bcm_sysport_set_coalesce,
};
static const struct net_device_ops bcm_sysport_netdev_ops = {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index e2c043eabbf3..f28bf545d7f4 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -292,7 +292,7 @@ struct bcm_rsb {
#define RDMA_END_ADDR_LO 0x102c
#define RDMA_MBDONE_INTR 0x1030
-#define RDMA_INTR_THRESH_MASK 0xff
+#define RDMA_INTR_THRESH_MASK 0x1ff
#define RDMA_TIMEOUT_SHIFT 16
#define RDMA_TIMEOUT_MASK 0xffff
@@ -663,8 +663,6 @@ struct bcm_sysport_priv {
/* Receive queue */
void __iomem *rx_bds;
- void __iomem *rx_bd_assign_ptr;
- unsigned int rx_bd_assign_index;
struct bcm_sysport_cb *rx_cbs;
unsigned int num_rx_bds;
unsigned int rx_read_ptr;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 1f82a04ce01a..7a4aaa3c01b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -357,6 +357,7 @@ struct sw_tx_bd {
struct sw_rx_page {
struct page *page;
DEFINE_DMA_UNMAP_ADDR(mapping);
+ unsigned int offset;
};
union db_prod {
@@ -381,9 +382,10 @@ union db_prod {
#define PAGES_PER_SGE_SHIFT 0
#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
-#define SGE_PAGE_SIZE PAGE_SIZE
-#define SGE_PAGE_SHIFT PAGE_SHIFT
-#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define SGE_PAGE_SHIFT 12
+#define SGE_PAGE_SIZE (1 << SGE_PAGE_SHIFT)
+#define SGE_PAGE_MASK (~(SGE_PAGE_SIZE - 1))
+#define SGE_PAGE_ALIGN(addr) (((addr) + SGE_PAGE_SIZE - 1) & SGE_PAGE_MASK)
#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
#define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
SGE_PAGES), 0xffff)
@@ -526,6 +528,12 @@ enum bnx2x_tpa_mode_t {
TPA_MODE_GRO
};
+struct bnx2x_alloc_pool {
+ struct page *page;
+ dma_addr_t dma;
+ unsigned int offset;
+};
+
struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */
@@ -599,6 +607,8 @@ struct bnx2x_fastpath {
4 (for the digits and to make it DWORD aligned) */
#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[FP_NAME_SIZE];
+
+ struct bnx2x_alloc_pool page_pool;
};
#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ec56a9b65dc3..e2a65334708d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -544,30 +544,49 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
u16 index, gfp_t gfp_mask)
{
- struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+ struct bnx2x_alloc_pool *pool = &fp->page_pool;
dma_addr_t mapping;
- if (unlikely(page == NULL)) {
- BNX2X_ERR("Can't alloc sge\n");
- return -ENOMEM;
- }
+ if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
- mapping = dma_map_page(&bp->pdev->dev, page, 0,
- SGE_PAGES, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- __free_pages(page, PAGES_PER_SGE_SHIFT);
- BNX2X_ERR("Can't map sge\n");
- return -ENOMEM;
+ /* put page reference used by the memory pool, since we
+ * won't be using this page as the mempool anymore.
+ */
+ if (pool->page)
+ put_page(pool->page);
+
+ pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
+ if (unlikely(!pool->page)) {
+ BNX2X_ERR("Can't alloc sge\n");
+ return -ENOMEM;
+ }
+
+ pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev,
+ pool->dma))) {
+ __free_pages(pool->page, PAGES_PER_SGE_SHIFT);
+ pool->page = NULL;
+ BNX2X_ERR("Can't map sge\n");
+ return -ENOMEM;
+ }
+ pool->offset = 0;
}
- sw_buf->page = page;
+ get_page(pool->page);
+ sw_buf->page = pool->page;
+ sw_buf->offset = pool->offset;
+
+ mapping = pool->dma + sw_buf->offset;
dma_unmap_addr_set(sw_buf, mapping, mapping);
sge->addr_hi = cpu_to_le32(U64_HI(mapping));
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
+ pool->offset += SGE_PAGE_SIZE;
+
return 0;
}
@@ -629,20 +648,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return err;
}
- /* Unmap the page as we're going to pass it to the stack */
- dma_unmap_page(&bp->pdev->dev,
- dma_unmap_addr(&old_rx_pg, mapping),
- SGE_PAGES, DMA_FROM_DEVICE);
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(&old_rx_pg, mapping),
+ SGE_PAGE_SIZE, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */
if (fp->mode == TPA_MODE_LRO)
- skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+ skb_fill_page_desc(skb, j, old_rx_pg.page,
+ old_rx_pg.offset, frag_len);
else { /* GRO */
int rem;
int offset = 0;
for (rem = frag_len; rem > 0; rem -= gro_size) {
int len = rem > gro_size ? gro_size : rem;
skb_fill_page_desc(skb, frag_id++,
- old_rx_pg.page, offset, len);
+ old_rx_pg.page,
+ old_rx_pg.offset + offset,
+ len);
if (offset)
get_page(old_rx_pg.page);
offset += len;
@@ -662,7 +683,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
{
if (fp->rx_frag_size)
- put_page(virt_to_head_page(data));
+ skb_free_frag(data);
else
kfree(data);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index d7a71758e876..2b30081ec26d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -804,9 +804,13 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
if (!page)
return;
- dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
- SGE_PAGES, DMA_FROM_DEVICE);
- __free_pages(page, PAGES_PER_SGE_SHIFT);
+ /* Since many fragments can share the same page, make sure to
+ * only unmap and free the page once.
+ */
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
+ SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ put_page(page);
sw_buf->page = NULL;
sge->addr_hi = 0;
@@ -964,6 +968,25 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
((u8 *)fw_lo)[1] = mac[4];
}
+static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
+ struct bnx2x_alloc_pool *pool)
+{
+ if (!pool->page)
+ return;
+
+ /* Page was not fully fragmented. Unmap unused space */
+ if (pool->offset < PAGE_SIZE) {
+ dma_addr_t dma = pool->dma + pool->offset;
+ int size = PAGE_SIZE - pool->offset;
+
+ dma_unmap_single(&bp->pdev->dev, dma, size, DMA_FROM_DEVICE);
+ }
+
+ put_page(pool->page);
+
+ pool->page = NULL;
+}
+
static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
struct bnx2x_fastpath *fp, int last)
{
@@ -974,6 +997,8 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
for (i = 0; i < last; i++)
bnx2x_free_rx_sge(bp, fp, i);
+
+ bnx2x_free_rx_mem_pool(bp, &fp->page_pool);
}
static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 6043734ea613..b43b2cb9b830 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2770,12 +2770,79 @@ static int bcmgenet_close(struct net_device *dev)
return ret;
}
+static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = ring->priv;
+ u32 p_index, c_index, intsts, intmsk;
+ struct netdev_queue *txq;
+ unsigned int free_bds;
+ unsigned long flags;
+ bool txq_stopped;
+
+ if (!netif_msg_tx_err(priv))
+ return;
+
+ txq = netdev_get_tx_queue(priv->dev, ring->queue);
+
+ spin_lock_irqsave(&ring->lock, flags);
+ if (ring->index == DESC_INDEX) {
+ intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
+ } else {
+ intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = 1 << ring->index;
+ }
+ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
+ p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
+ txq_stopped = netif_tx_queue_stopped(txq);
+ free_bds = ring->free_bds;
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
+ "TX queue status: %s, interrupts: %s\n"
+ "(sw)free_bds: %d (sw)size: %d\n"
+ "(sw)p_index: %d (hw)p_index: %d\n"
+ "(sw)c_index: %d (hw)c_index: %d\n"
+ "(sw)clean_p: %d (sw)write_p: %d\n"
+ "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
+ ring->index, ring->queue,
+ txq_stopped ? "stopped" : "active",
+ intsts & intmsk ? "enabled" : "disabled",
+ free_bds, ring->size,
+ ring->prod_index, p_index & DMA_P_INDEX_MASK,
+ ring->c_index, c_index & DMA_C_INDEX_MASK,
+ ring->clean_ptr, ring->write_ptr,
+ ring->cb_ptr, ring->end_ptr);
+}
+
static void bcmgenet_timeout(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 int0_enable = 0;
+ u32 int1_enable = 0;
+ unsigned int q;
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
+ bcmgenet_disable_tx_napi(priv);
+
+ for (q = 0; q < priv->hw_params->tx_queues; q++)
+ bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
+ bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
+
+ bcmgenet_tx_reclaim_all(dev);
+
+ for (q = 0; q < priv->hw_params->tx_queues; q++)
+ int1_enable |= (1 << q);
+
+ int0_enable = UMAC_IRQ_TXDMA_DONE;
+
+ /* Re-enable TX interrupts if disabled */
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
+
+ bcmgenet_enable_tx_napi(priv);
+
dev->trans_start = jiffies;
dev->stats.tx_errors++;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 069952fa5d64..73c934cf6c61 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6618,7 +6618,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
static void tg3_frag_free(bool is_frag, void *data)
{
if (is_frag)
- put_page(virt_to_head_page(data));
+ skb_free_frag(data);
else
kfree(data);
}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index fc646a41d548..740d04fd2223 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -54,6 +54,8 @@
#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
+#define GEM_MTU_MIN_SIZE 68
+
/*
* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
@@ -785,7 +787,7 @@ static int gem_rx(struct macb *bp, int budget)
}
/* now everything is ready for receiving packet */
bp->rx_skbuff[entry] = NULL;
- len = MACB_BFEXT(RX_FRMLEN, ctrl);
+ len = ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
@@ -831,7 +833,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
struct macb_dma_desc *desc;
desc = macb_rx_desc(bp, last_frag);
- len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
+ len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
macb_rx_ring_wrap(first_frag),
@@ -1651,7 +1653,10 @@ static void macb_init_hw(struct macb *bp)
config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
- config |= MACB_BIT(BIG); /* Receive oversized frames */
+ if (bp->caps & MACB_CAPS_JUMBO)
+ config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
+ else
+ config |= MACB_BIT(BIG); /* Receive oversized frames */
if (bp->dev->flags & IFF_PROMISC)
config |= MACB_BIT(CAF); /* Copy All Frames */
else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
@@ -1660,8 +1665,13 @@ static void macb_init_hw(struct macb *bp)
config |= MACB_BIT(NBC); /* No BroadCast */
config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
+ if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
+ gem_writel(bp, JML, bp->jumbo_max_len);
bp->speed = SPEED_10;
bp->duplex = DUPLEX_HALF;
+ bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
+ if (bp->caps & MACB_CAPS_JUMBO)
+ bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
macb_configure_dma(bp);
@@ -1865,6 +1875,26 @@ static int macb_close(struct net_device *dev)
return 0;
}
+static int macb_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct macb *bp = netdev_priv(dev);
+ u32 max_mtu;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ max_mtu = ETH_DATA_LEN;
+ if (bp->caps & MACB_CAPS_JUMBO)
+ max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
+
+ if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE))
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
static void gem_update_stats(struct macb *bp)
{
int i;
@@ -2141,7 +2171,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_get_stats = macb_get_stats,
.ndo_do_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = macb_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = macb_poll_controller,
@@ -2702,6 +2732,16 @@ static const struct macb_config emac_config = {
.init = at91ether_init,
};
+
+static const struct macb_config zynqmp_config = {
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_JUMBO,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .jumbo_max_len = 10240,
+};
+
static const struct macb_config zynq_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
MACB_CAPS_NO_GIGABIT_HALF,
@@ -2720,6 +2760,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },
+ { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
{ /* sentinel */ }
};
@@ -2789,6 +2830,10 @@ static int macb_probe(struct platform_device *pdev)
bp->pclk = pclk;
bp->hclk = hclk;
bp->tx_clk = tx_clk;
+ if (macb_config->jumbo_max_len) {
+ bp->jumbo_max_len = macb_config->jumbo_max_len;
+ }
+
spin_lock_init(&bp->lock);
/* setup capabilities */
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 24b1d9bcd865..d74655993d4b 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -71,6 +71,7 @@
#define GEM_NCFGR 0x0004 /* Network Config */
#define GEM_USRIO 0x000c /* User IO */
#define GEM_DMACFG 0x0010 /* DMA Configuration */
+#define GEM_JML 0x0048 /* Jumbo Max Length */
#define GEM_HRB 0x0080 /* Hash Bottom */
#define GEM_HRT 0x0084 /* Hash Top */
#define GEM_SA1B 0x0088 /* Specific1 Bottom */
@@ -398,6 +399,7 @@
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
+#define MACB_CAPS_JUMBO 0x00000008
/* Bit manipulation macros */
#define MACB_BIT(name) \
@@ -515,6 +517,9 @@ struct macb_dma_desc {
#define MACB_RX_BROADCAST_OFFSET 31
#define MACB_RX_BROADCAST_SIZE 1
+#define MACB_RX_FRMLEN_MASK 0xFFF
+#define MACB_RX_JFRMLEN_MASK 0x3FFF
+
/* RX checksum offload disabled: bit 24 clear in NCFGR */
#define GEM_RX_TYPEID_MATCH_OFFSET 22
#define GEM_RX_TYPEID_MATCH_SIZE 2
@@ -758,6 +763,7 @@ struct macb_config {
int (*clk_init)(struct platform_device *pdev, struct clk **pclk,
struct clk **hclk, struct clk **tx_clk);
int (*init)(struct platform_device *pdev);
+ int jumbo_max_len;
};
struct macb_queue {
@@ -827,6 +833,9 @@ struct macb {
unsigned int max_tx_length;
u64 ethtool_stats[GEM_STATS_LEN];
+
+ unsigned int rx_frm_len_mask;
+ unsigned int jumbo_max_len;
};
static inline bool macb_is_gem(struct macb *bp)
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
new file mode 100644
index 000000000000..fc3d8e3ee807
--- /dev/null
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -0,0 +1,40 @@
+#
+# Cavium ethernet device configuration
+#
+
+config NET_VENDOR_CAVIUM
+ tristate "Cavium ethernet drivers"
+ depends on PCI && 64BIT
+ ---help---
+ Enable support for the Cavium ThunderX Network Interface
+ Controller (NIC). The NIC provides the controller and DMA
+ engines to move network traffic to/from the memory. The NIC
+ works closely with TNS, BGX and SerDes to implement the
+ functions replacing and virtualizing those of a typical
+ standalone PCIe NIC chip.
+
+ If you have a Cavium Thunder board, say Y.
+
+if NET_VENDOR_CAVIUM
+
+config THUNDER_NIC_PF
+ tristate "Thunder Physical function driver"
+ default NET_VENDOR_CAVIUM
+ select THUNDER_NIC_BGX
+ ---help---
+ This driver supports Thunder's NIC physical function.
+
+config THUNDER_NIC_VF
+ tristate "Thunder Virtual function driver"
+ default NET_VENDOR_CAVIUM
+ ---help---
+ This driver supports Thunder's NIC virtual function
+
+config THUNDER_NIC_BGX
+ tristate "Thunder MAC interface driver (BGX)"
+ default NET_VENDOR_CAVIUM
+ ---help---
+ This driver supports programming and controlling of MAC
+ interface from NIC physical function driver.
+
+endif # NET_VENDOR_CAVIUM
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
new file mode 100644
index 000000000000..7aac4780d050
--- /dev/null
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Cavium ethernet device drivers.
+#
+
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile
new file mode 100644
index 000000000000..5c4615ccaa14
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for Cavium's Thunder ethernet device
+#
+
+obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o
+obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o
+obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o
+
+nicpf-y := nic_main.o
+nicvf-y := nicvf_main.o nicvf_queues.o
+nicvf-y += nicvf_ethtool.o
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
new file mode 100644
index 000000000000..a3b43e50a576
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NIC_H
+#define NIC_H
+
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include "thunder_bgx.h"
+
+/* PCI device IDs */
+#define PCI_DEVICE_ID_THUNDER_NIC_PF 0xA01E
+#define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF 0x0011
+#define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034
+#define PCI_DEVICE_ID_THUNDER_BGX 0xA026
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 0
+#define PCI_MSIX_REG_BAR_NUM 4
+
+/* NIC SRIOV VF count */
+#define MAX_NUM_VFS_SUPPORTED 128
+#define DEFAULT_NUM_VF_ENABLED 8
+
+#define NIC_TNS_BYPASS_MODE 0
+#define NIC_TNS_MODE 1
+
+/* NIC priv flags */
+#define NIC_SRIOV_ENABLED BIT(0)
+
+/* Min/Max packet size */
+#define NIC_HW_MIN_FRS 64
+#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */
+
+/* Max pkinds */
+#define NIC_MAX_PKIND 16
+
+/* Rx Channels */
+/* Receive channel configuration in TNS bypass mode
+ * Below is configuration in TNS bypass mode
+ * BGX0-LMAC0-CHAN0 - VNIC CHAN0
+ * BGX0-LMAC1-CHAN0 - VNIC CHAN16
+ * ...
+ * BGX1-LMAC0-CHAN0 - VNIC CHAN128
+ * ...
+ * BGX1-LMAC3-CHAN0 - VNIC CHAN174
+ */
+#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */
+#define NIC_CHANS_PER_INF 128
+#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF)
+#define NIC_CPI_COUNT 2048 /* No of channel parse indices */
+
+/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */
+#define NIC_MAX_BGX MAX_BGX_PER_CN88XX
+#define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX)
+#define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */
+#define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX)
+
+/* Tx scheduling */
+#define NIC_MAX_TL4 1024
+#define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */
+#define NIC_MAX_TL3 256
+#define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */
+#define NIC_MAX_TL2 64
+#define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */
+#define NIC_MAX_TL1 2
+
+/* TNS bypass mode */
+#define NIC_TL2_PER_BGX 32
+#define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX)
+#define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF)
+
+/* NIC VF Interrupts */
+#define NICVF_INTR_CQ 0
+#define NICVF_INTR_SQ 1
+#define NICVF_INTR_RBDR 2
+#define NICVF_INTR_PKT_DROP 3
+#define NICVF_INTR_TCP_TIMER 4
+#define NICVF_INTR_MBOX 5
+#define NICVF_INTR_QS_ERR 6
+
+#define NICVF_INTR_CQ_SHIFT 0
+#define NICVF_INTR_SQ_SHIFT 8
+#define NICVF_INTR_RBDR_SHIFT 16
+#define NICVF_INTR_PKT_DROP_SHIFT 20
+#define NICVF_INTR_TCP_TIMER_SHIFT 21
+#define NICVF_INTR_MBOX_SHIFT 22
+#define NICVF_INTR_QS_ERR_SHIFT 23
+
+#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
+#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
+#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
+#define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT)
+#define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT)
+#define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT)
+#define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT)
+
+/* MSI-X interrupts */
+#define NIC_PF_MSIX_VECTORS 10
+#define NIC_VF_MSIX_VECTORS 20
+
+#define NIC_PF_INTR_ID_ECC0_SBE 0
+#define NIC_PF_INTR_ID_ECC0_DBE 1
+#define NIC_PF_INTR_ID_ECC1_SBE 2
+#define NIC_PF_INTR_ID_ECC1_DBE 3
+#define NIC_PF_INTR_ID_ECC2_SBE 4
+#define NIC_PF_INTR_ID_ECC2_DBE 5
+#define NIC_PF_INTR_ID_ECC3_SBE 6
+#define NIC_PF_INTR_ID_ECC3_DBE 7
+#define NIC_PF_INTR_ID_MBOX0 8
+#define NIC_PF_INTR_ID_MBOX1 9
+
+/* Global timer for CQ timer thresh interrupts
+ * Calculated for SCLK of 700Mhz
+ * value written should be a 1/16th of what is expected
+ *
+ * 1 tick per 0.05usec = value of 2.2
+ * This 10% would be covered in CQ timer thresh value
+ */
+#define NICPF_CLK_PER_INT_TICK 2
+
+struct nicvf_cq_poll {
+ u8 cq_idx; /* Completion queue index */
+ struct napi_struct napi;
+};
+
+#define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */
+#define NIC_MAX_RSS_HASH_BITS 8
+#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
+#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
+
+struct nicvf_rss_info {
+ bool enable;
+#define RSS_L2_EXTENDED_HASH_ENA BIT(0)
+#define RSS_IP_HASH_ENA BIT(1)
+#define RSS_TCP_HASH_ENA BIT(2)
+#define RSS_TCP_SYN_DIS BIT(3)
+#define RSS_UDP_HASH_ENA BIT(4)
+#define RSS_L4_EXTENDED_HASH_ENA BIT(5)
+#define RSS_ROCE_ENA BIT(6)
+#define RSS_L3_BI_DIRECTION_ENA BIT(7)
+#define RSS_L4_BI_DIRECTION_ENA BIT(8)
+ u64 cfg;
+ u8 hash_bits;
+ u16 rss_size;
+ u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+ u64 key[RSS_HASH_KEY_SIZE];
+} ____cacheline_aligned_in_smp;
+
+enum rx_stats_reg_offset {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_RED = 0x4,
+ RX_RED_OCTS = 0x5,
+ RX_ORUN = 0x6,
+ RX_ORUN_OCTS = 0x7,
+ RX_FCS = 0x8,
+ RX_L2ERR = 0x9,
+ RX_DRP_BCAST = 0xa,
+ RX_DRP_MCAST = 0xb,
+ RX_DRP_L3BCAST = 0xc,
+ RX_DRP_L3MCAST = 0xd,
+ RX_STATS_ENUM_LAST,
+};
+
+enum tx_stats_reg_offset {
+ TX_OCTS = 0x0,
+ TX_UCAST = 0x1,
+ TX_BCAST = 0x2,
+ TX_MCAST = 0x3,
+ TX_DROP = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+struct nicvf_hw_stats {
+ u64 rx_bytes_ok;
+ u64 rx_ucast_frames_ok;
+ u64 rx_bcast_frames_ok;
+ u64 rx_mcast_frames_ok;
+ u64 rx_fcs_errors;
+ u64 rx_l2_errors;
+ u64 rx_drop_red;
+ u64 rx_drop_red_bytes;
+ u64 rx_drop_overrun;
+ u64 rx_drop_overrun_bytes;
+ u64 rx_drop_bcast;
+ u64 rx_drop_mcast;
+ u64 rx_drop_l3_bcast;
+ u64 rx_drop_l3_mcast;
+ u64 tx_bytes_ok;
+ u64 tx_ucast_frames_ok;
+ u64 tx_bcast_frames_ok;
+ u64 tx_mcast_frames_ok;
+ u64 tx_drops;
+};
+
+struct nicvf_drv_stats {
+ /* Rx */
+ u64 rx_frames_ok;
+ u64 rx_frames_64;
+ u64 rx_frames_127;
+ u64 rx_frames_255;
+ u64 rx_frames_511;
+ u64 rx_frames_1023;
+ u64 rx_frames_1518;
+ u64 rx_frames_jumbo;
+ u64 rx_drops;
+ /* Tx */
+ u64 tx_frames_ok;
+ u64 tx_drops;
+ u64 tx_busy;
+ u64 tx_tso;
+};
+
+struct nicvf {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ u8 vf_id;
+ u8 node;
+ u8 tns_mode;
+ u16 mtu;
+ struct queue_set *qs;
+ void __iomem *reg_base;
+ bool link_up;
+ u8 duplex;
+ u32 speed;
+ struct page *rb_page;
+ u32 rb_page_offset;
+ bool rb_alloc_fail;
+ bool rb_work_scheduled;
+ struct delayed_work rbdr_work;
+ struct tasklet_struct rbdr_task;
+ struct tasklet_struct qs_err_task;
+ struct tasklet_struct cq_task;
+ struct nicvf_cq_poll *napi[8];
+ struct nicvf_rss_info rss_info;
+ u8 cpi_alg;
+ /* Interrupt coalescing settings */
+ u32 cq_coalesce_usecs;
+
+ u32 msg_enable;
+ struct nicvf_hw_stats stats;
+ struct nicvf_drv_stats drv_stats;
+ struct bgx_stats bgx_stats;
+ struct work_struct reset_task;
+
+ /* MSI-X */
+ bool msix_enabled;
+ u8 num_vec;
+ struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
+ char irq_name[NIC_VF_MSIX_VECTORS][20];
+ bool irq_allocated[NIC_VF_MSIX_VECTORS];
+
+ bool pf_ready_to_rcv_msg;
+ bool pf_acked;
+ bool pf_nacked;
+ bool bgx_stats_acked;
+} ____cacheline_aligned_in_smp;
+
+/* PF <--> VF Mailbox communication
+ * Eight 64bit registers are shared between PF and VF.
+ * Separate set for each VF.
+ * Writing '1' into last register mbx7 means end of message.
+ */
+
+/* PF <--> VF mailbox communication */
+#define NIC_PF_VF_MAILBOX_SIZE 2
+#define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */
+
+/* Mailbox message types */
+#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */
+#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */
+#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */
+#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */
+#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */
+#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */
+#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */
+#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */
+#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */
+#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */
+#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */
+#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */
+#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */
+#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */
+#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
+#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
+#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
+#define NIC_MBOX_MSG_CFG_DONE 0x12 /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN 0x13 /* VF is being shutdown */
+
+struct nic_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 tns_mode;
+ u8 node_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Qset configuration */
+struct qs_cfg_msg {
+ u8 msg;
+ u8 num;
+ u64 cfg;
+};
+
+/* Receive queue configuration */
+struct rq_cfg_msg {
+ u8 msg;
+ u8 qs_num;
+ u8 rq_num;
+ u64 cfg;
+};
+
+/* Send queue configuration */
+struct sq_cfg_msg {
+ u8 msg;
+ u8 qs_num;
+ u8 sq_num;
+ u64 cfg;
+};
+
+/* Set VF's MAC address */
+struct set_mac_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Set Maximum frame size */
+struct set_frs_msg {
+ u8 msg;
+ u8 vf_id;
+ u16 max_frs;
+};
+
+/* Set CPI algorithm type */
+struct cpi_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 rq_cnt;
+ u8 cpi_alg;
+};
+
+/* Get RSS table size */
+struct rss_sz_msg {
+ u8 msg;
+ u8 vf_id;
+ u16 ind_tbl_size;
+};
+
+/* Set RSS configuration */
+struct rss_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 hash_bits;
+ u8 tbl_len;
+ u8 tbl_offset;
+#define RSS_IND_TBL_LEN_PER_MBX_MSG 8
+ u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
+};
+
+struct bgx_stats_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 rx;
+ u8 idx;
+ u64 stats;
+};
+
+/* Physical interface link status */
+struct bgx_link_status {
+ u8 msg;
+ u8 link_up;
+ u8 duplex;
+ u32 speed;
+};
+
+/* 128 bit shared memory between PF and each VF */
+union nic_mbx {
+ struct { u8 msg; } msg;
+ struct nic_cfg_msg nic_cfg;
+ struct qs_cfg_msg qs;
+ struct rq_cfg_msg rq;
+ struct sq_cfg_msg sq;
+ struct set_mac_msg mac;
+ struct set_frs_msg frs;
+ struct cpi_cfg_msg cpi_cfg;
+ struct rss_sz_msg rss_size;
+ struct rss_cfg_msg rss_cfg;
+ struct bgx_stats_msg bgx_stats;
+ struct bgx_link_status link_status;
+};
+
+#define NIC_NODE_ID_MASK 0x03
+#define NIC_NODE_ID_SHIFT 44
+
+static inline int nic_get_node_id(struct pci_dev *pdev)
+{
+ u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
+ return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
+}
+
+int nicvf_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues);
+int nicvf_open(struct net_device *netdev);
+int nicvf_stop(struct net_device *netdev);
+int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
+void nicvf_config_rss(struct nicvf *nic);
+void nicvf_set_rss_key(struct nicvf *nic);
+void nicvf_set_ethtool_ops(struct net_device *netdev);
+void nicvf_update_stats(struct nicvf *nic);
+void nicvf_update_lmac_stats(struct nicvf *nic);
+
+#endif /* NIC_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
new file mode 100644
index 000000000000..6e0c03169a55
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -0,0 +1,932 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/of.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-nic"
+#define DRV_VERSION "1.0"
+
+struct nicpf {
+ struct pci_dev *pdev;
+ u8 rev_id;
+ u8 node;
+ unsigned int flags;
+ u8 num_vf_en; /* No of VF enabled */
+ bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
+ void __iomem *reg_base; /* Register start address */
+ struct pkind_cfg pkind;
+#define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
+#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
+#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
+ u8 vf_lmac_map[MAX_LMAC];
+ struct delayed_work dwork;
+ struct workqueue_struct *check_link;
+ u8 link[MAX_LMAC];
+ u8 duplex[MAX_LMAC];
+ u32 speed[MAX_LMAC];
+ u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
+ u16 rss_ind_tbl_size;
+ bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
+
+ /* MSI-X */
+ bool msix_enabled;
+ u8 num_vec;
+ struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS];
+ bool irq_allocated[NIC_PF_MSIX_VECTORS];
+};
+
+/* Supported devices */
+static const struct pci_device_id nic_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham");
+MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, nic_id_table);
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
+{
+ writeq_relaxed(val, nic->reg_base + offset);
+}
+
+static u64 nic_reg_read(struct nicpf *nic, u64 offset)
+{
+ return readq_relaxed(nic->reg_base + offset);
+}
+
+/* PF -> VF mailbox communication APIs */
+static void nic_enable_mbx_intr(struct nicpf *nic)
+{
+ /* Enable mailbox interrupt for all 128 VFs */
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull);
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull);
+}
+
+static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
+{
+ nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf));
+}
+
+static u64 nic_get_mbx_addr(int vf)
+{
+ return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
+}
+
+/* Send a mailbox message to VF
+ * @vf: vf to which this message to be sent
+ * @mbx: Message to be sent
+ */
+static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
+{
+ void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf);
+ u64 *msg = (u64 *)mbx;
+
+ /* In first revision HW, mbox interrupt is triggerred
+ * when PF writes to MBOX(1), in next revisions when
+ * PF writes to MBOX(0)
+ */
+ if (nic->rev_id == 0) {
+ /* see the comment for nic_reg_write()/nic_reg_read()
+ * functions above
+ */
+ writeq_relaxed(msg[0], mbx_addr);
+ writeq_relaxed(msg[1], mbx_addr + 8);
+ } else {
+ writeq_relaxed(msg[1], mbx_addr + 8);
+ writeq_relaxed(msg[0], mbx_addr);
+ }
+}
+
+/* Responds to VF's READY message with VF's
+ * ID, node, MAC address e.t.c
+ * @vf: VF which sent READY message
+ */
+static void nic_mbx_send_ready(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ int bgx_idx, lmac;
+ const char *mac;
+
+ mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
+ mbx.nic_cfg.vf_id = vf;
+
+ mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
+ if (mac)
+ ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
+
+ mbx.nic_cfg.node_id = nic->node;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* ACKs VF's mailbox message
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_ack(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_ACK;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* NACKs VF's mailbox message that PF is not able to
+ * complete the action
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_nack(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_NACK;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Flush all in flight receive packets to memory and
+ * bring down an active RQ
+ */
+static int nic_rcv_queue_sw_sync(struct nicpf *nic)
+{
+ u16 timeout = ~0x00;
+
+ nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
+ /* Wait till sync cycle is finished */
+ while (timeout) {
+ if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
+ break;
+ timeout--;
+ }
+ nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
+ if (!timeout) {
+ dev_err(&nic->pdev->dev, "Receive queue software sync failed");
+ return 1;
+ }
+ return 0;
+}
+
+/* Get BGX Rx/Tx stats and respond to VF's request */
+static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
+{
+ int bgx_idx, lmac;
+ union nic_mbx mbx = {};
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
+
+ mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
+ mbx.bgx_stats.vf_id = bgx->vf_id;
+ mbx.bgx_stats.rx = bgx->rx;
+ mbx.bgx_stats.idx = bgx->idx;
+ if (bgx->rx)
+ mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx,
+ lmac, bgx->idx);
+ else
+ mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx,
+ lmac, bgx->idx);
+ nic_send_msg_to_vf(nic, bgx->vf_id, &mbx);
+}
+
+/* Update hardware min/max frame size */
+static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
+{
+ if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) {
+ dev_err(&nic->pdev->dev,
+ "Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
+ vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
+ return 1;
+ }
+ new_frs += ETH_HLEN;
+ if (new_frs <= nic->pkind.maxlen)
+ return 0;
+
+ nic->pkind.maxlen = new_frs;
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind);
+ return 0;
+}
+
+/* Set minimum transmit packet size */
+static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
+{
+ int lmac;
+ u64 lmac_cfg;
+
+ /* Max value that can be set is 60 */
+ if (size > 60)
+ size = 60;
+
+ for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
+ lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
+ lmac_cfg &= ~(0xF << 2);
+ lmac_cfg |= ((size / 4) << 2);
+ nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
+ }
+}
+
+/* Function to check number of LMACs present and set VF::LMAC mapping.
+ * Mapping will be used while initializing channels.
+ */
+static void nic_set_lmac_vf_mapping(struct nicpf *nic)
+{
+ unsigned bgx_map = bgx_get_map(nic->node);
+ int bgx, next_bgx_lmac = 0;
+ int lmac, lmac_cnt = 0;
+ u64 lmac_credit;
+
+ nic->num_vf_en = 0;
+
+ for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
+ if (!(bgx_map & (1 << bgx)))
+ continue;
+ lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
+ for (lmac = 0; lmac < lmac_cnt; lmac++)
+ nic->vf_lmac_map[next_bgx_lmac++] =
+ NIC_SET_VF_LMAC_MAP(bgx, lmac);
+ nic->num_vf_en += lmac_cnt;
+
+ /* Program LMAC credits */
+ lmac_credit = (1ull << 1); /* channel credit enable */
+ lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */
+ /* 48KB BGX Tx buffer size, each unit is of size 16bytes */
+ lmac_credit |= (((((48 * 1024) / lmac_cnt) -
+ NIC_HW_MAX_FRS) / 16) << 12);
+ lmac = bgx * MAX_LMAC_PER_BGX;
+ for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
+ nic_reg_write(nic,
+ NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
+ lmac_credit);
+ }
+}
+
+#define BGX0_BLOCK 8
+#define BGX1_BLOCK 9
+
+static void nic_init_hw(struct nicpf *nic)
+{
+ int i;
+
+ /* Reset NIC, in case the driver is repeatedly inserted and removed */
+ nic_reg_write(nic, NIC_PF_SOFT_RESET, 1);
+
+ /* Enable NIC HW block */
+ nic_reg_write(nic, NIC_PF_CFG, 0x3);
+
+ /* Enable backpressure */
+ nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
+
+ /* Disable TNS mode on both interfaces */
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
+ (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
+ (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
+ (1ULL << 63) | BGX0_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
+ (1ULL << 63) | BGX1_BLOCK);
+
+ /* PKIND configuration */
+ nic->pkind.minlen = 0;
+ nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
+ nic->pkind.lenerr_en = 1;
+ nic->pkind.rx_hdr = 0;
+ nic->pkind.hdr_sl = 0;
+
+ for (i = 0; i < NIC_MAX_PKIND; i++)
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3),
+ *(u64 *)&nic->pkind);
+
+ nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
+
+ /* Timer config */
+ nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
+}
+
+/* Channel parse index configuration */
+static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
+{
+ u32 vnic, bgx, lmac, chan;
+ u32 padd, cpi_count = 0;
+ u64 cpi_base, cpi, rssi_base, rssi;
+ u8 qset, rq_idx = 0;
+
+ vnic = cfg->vf_id;
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+
+ chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
+ cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX);
+ rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX);
+
+ /* Rx channel configuration */
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
+ (1ull << 63) | (vnic << 0));
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
+ ((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
+
+ if (cfg->cpi_alg == CPI_ALG_NONE)
+ cpi_count = 1;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
+ cpi_count = 8;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
+ cpi_count = 16;
+ else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
+ cpi_count = NIC_MAX_CPI_PER_LMAC;
+
+ /* RSS Qset, Qidx mapping */
+ qset = cfg->vf_id;
+ rssi = rssi_base;
+ for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
+ nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+ (qset << 3) | rq_idx);
+ rq_idx++;
+ }
+
+ rssi = 0;
+ cpi = cpi_base;
+ for (; cpi < (cpi_base + cpi_count); cpi++) {
+ /* Determine port to channel adder */
+ if (cfg->cpi_alg != CPI_ALG_DIFF)
+ padd = cpi % cpi_count;
+ else
+ padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
+
+ /* Leave RSS_SIZE as '0' to disable RSS */
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+ (vnic << 24) | (padd << 16) | (rssi_base + rssi));
+
+ if ((rssi + 1) >= cfg->rq_cnt)
+ continue;
+
+ if (cfg->cpi_alg == CPI_ALG_VLAN)
+ rssi++;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN16)
+ rssi = ((cpi - cpi_base) & 0xe) >> 1;
+ else if (cfg->cpi_alg == CPI_ALG_DIFF)
+ rssi = ((cpi - cpi_base) & 0x38) >> 3;
+ }
+ nic->cpi_base[cfg->vf_id] = cpi_base;
+}
+
+/* Responsds to VF with its RSS indirection table size */
+static void nic_send_rss_size(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ u64 *msg;
+
+ msg = (u64 *)&mbx;
+
+ mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
+ mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Receive side scaling configuration
+ * configure:
+ * - RSS index
+ * - indir table i.e hash::RQ mapping
+ * - no of hash bits to consider
+ */
+static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
+{
+ u8 qset, idx = 0;
+ u64 cpi_cfg, cpi_base, rssi_base, rssi;
+
+ cpi_base = nic->cpi_base[cfg->vf_id];
+ cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3));
+ rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset;
+
+ rssi = rssi_base;
+ qset = cfg->vf_id;
+
+ for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
+ nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+ (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
+ idx++;
+ }
+
+ cpi_cfg &= ~(0xFULL << 20);
+ cpi_cfg |= (cfg->hash_bits << 20);
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg);
+}
+
+/* 4 level transmit side scheduler configutation
+ * for TNS bypass mode
+ *
+ * Sample configuration for SQ0
+ * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
+ * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
+ * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
+ * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0
+ * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1
+ * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1
+ * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
+ * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
+ */
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
+{
+ u32 bgx, lmac, chan;
+ u32 tl2, tl3, tl4;
+ u32 rr_quantum;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+ /* 24 bytes for FCS, IPG and preamble */
+ rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
+
+ tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ tl4 += sq_idx;
+ tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
+ nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
+ ((u64)vnic << NIC_QS_ID_SHIFT) |
+ ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
+ nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
+ ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
+
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
+ chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+ /* Enable backpressure on the channel */
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
+
+ tl2 = tl3 >> 2;
+ nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
+ nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
+ /* No priorities as of now */
+ nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ u64 *mbx_data;
+ u64 mbx_addr;
+ u64 reg_addr;
+ int bgx, lmac;
+ int i;
+ int ret = 0;
+
+ nic->mbx_lock[vf] = true;
+
+ mbx_addr = nic_get_mbx_addr(vf);
+ mbx_data = (u64 *)&mbx;
+
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ *mbx_data = nic_reg_read(nic, mbx_addr);
+ mbx_data++;
+ mbx_addr += sizeof(u64);
+ }
+
+ dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n",
+ __func__, mbx.msg.msg, vf);
+ switch (mbx.msg.msg) {
+ case NIC_MBOX_MSG_READY:
+ nic_mbx_send_ready(nic, vf);
+ nic->link[vf] = 0;
+ nic->duplex[vf] = 0;
+ nic->speed[vf] = 0;
+ ret = 1;
+ break;
+ case NIC_MBOX_MSG_QS_CFG:
+ reg_addr = NIC_PF_QSET_0_127_CFG |
+ (mbx.qs.num << NIC_QS_ID_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.qs.cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_BP_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_SW_SYNC:
+ ret = nic_rcv_queue_sw_sync(nic);
+ break;
+ case NIC_MBOX_MSG_RQ_DROP_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_SQ_CFG:
+ reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
+ (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.sq.cfg);
+ nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num);
+ break;
+ case NIC_MBOX_MSG_SET_MAC:
+ lmac = mbx.mac.vf_id;
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+ bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
+ break;
+ case NIC_MBOX_MSG_SET_MAX_FRS:
+ ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
+ mbx.frs.vf_id);
+ break;
+ case NIC_MBOX_MSG_CPI_CFG:
+ nic_config_cpi(nic, &mbx.cpi_cfg);
+ break;
+ case NIC_MBOX_MSG_RSS_SIZE:
+ nic_send_rss_size(nic, vf);
+ goto unlock;
+ case NIC_MBOX_MSG_RSS_CFG:
+ case NIC_MBOX_MSG_RSS_CFG_CONT:
+ nic_config_rss(nic, &mbx.rss_cfg);
+ break;
+ case NIC_MBOX_MSG_CFG_DONE:
+ /* Last message of VF config msg sequence */
+ nic->vf_enabled[vf] = true;
+ goto unlock;
+ case NIC_MBOX_MSG_SHUTDOWN:
+ /* First msg in VF teardown sequence */
+ nic->vf_enabled[vf] = false;
+ break;
+ case NIC_MBOX_MSG_BGX_STATS:
+ nic_get_bgx_stats(nic, &mbx.bgx_stats);
+ goto unlock;
+ default:
+ dev_err(&nic->pdev->dev,
+ "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
+ break;
+ }
+
+ if (!ret)
+ nic_mbx_send_ack(nic, vf);
+ else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
+ nic_mbx_send_nack(nic, vf);
+unlock:
+ nic->mbx_lock[vf] = false;
+}
+
+static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
+{
+ u64 intr;
+ u8 vf, vf_per_mbx_reg = 64;
+
+ intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
+ dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
+ for (vf = 0; vf < vf_per_mbx_reg; vf++) {
+ if (intr & (1ULL << vf)) {
+ dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
+ vf + (mbx * vf_per_mbx_reg));
+ if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en)
+ break;
+ nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
+ nic_clear_mbx_intr(nic, vf, mbx);
+ }
+ }
+}
+
+static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq)
+{
+ struct nicpf *nic = (struct nicpf *)nic_irq;
+
+ nic_mbx_intr_handler(nic, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq)
+{
+ struct nicpf *nic = (struct nicpf *)nic_irq;
+
+ nic_mbx_intr_handler(nic, 1);
+
+ return IRQ_HANDLED;
+}
+
+static int nic_enable_msix(struct nicpf *nic)
+{
+ int i, ret;
+
+ nic->num_vec = NIC_PF_MSIX_VECTORS;
+
+ for (i = 0; i < nic->num_vec; i++)
+ nic->msix_entries[i].entry = i;
+
+ ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
+ if (ret) {
+ dev_err(&nic->pdev->dev,
+ "Request for #%d msix vectors failed\n",
+ nic->num_vec);
+ return ret;
+ }
+
+ nic->msix_enabled = 1;
+ return 0;
+}
+
+static void nic_disable_msix(struct nicpf *nic)
+{
+ if (nic->msix_enabled) {
+ pci_disable_msix(nic->pdev);
+ nic->msix_enabled = 0;
+ nic->num_vec = 0;
+ }
+}
+
+static void nic_free_all_interrupts(struct nicpf *nic)
+{
+ int irq;
+
+ for (irq = 0; irq < nic->num_vec; irq++) {
+ if (nic->irq_allocated[irq])
+ free_irq(nic->msix_entries[irq].vector, nic);
+ nic->irq_allocated[irq] = false;
+ }
+}
+
+static int nic_register_interrupts(struct nicpf *nic)
+{
+ int ret;
+
+ /* Enable MSI-X */
+ ret = nic_enable_msix(nic);
+ if (ret)
+ return ret;
+
+ /* Register mailbox interrupt handlers */
+ ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector,
+ nic_mbx0_intr_handler, 0, "NIC Mbox0", nic);
+ if (ret)
+ goto fail;
+
+ nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true;
+
+ ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector,
+ nic_mbx1_intr_handler, 0, "NIC Mbox1", nic);
+ if (ret)
+ goto fail;
+
+ nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true;
+
+ /* Enable mailbox interrupt */
+ nic_enable_mbx_intr(nic);
+ return 0;
+
+fail:
+ dev_err(&nic->pdev->dev, "Request irq failed\n");
+ nic_free_all_interrupts(nic);
+ return ret;
+}
+
+static void nic_unregister_interrupts(struct nicpf *nic)
+{
+ nic_free_all_interrupts(nic);
+ nic_disable_msix(nic);
+}
+
+static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
+{
+ int pos = 0;
+ int err;
+ u16 total_vf_cnt;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
+ return -ENODEV;
+ }
+
+ pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
+ if (total_vf_cnt < nic->num_vf_en)
+ nic->num_vf_en = total_vf_cnt;
+
+ if (!total_vf_cnt)
+ return 0;
+
+ err = pci_enable_sriov(pdev, nic->num_vf_en);
+ if (err) {
+ dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
+ nic->num_vf_en);
+ nic->num_vf_en = 0;
+ return err;
+ }
+
+ dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
+ nic->num_vf_en);
+
+ nic->flags |= NIC_SRIOV_ENABLED;
+ return 0;
+}
+
+/* Poll for BGX LMAC link status and update corresponding VF
+ * if there is a change, valid only if internal L2 switch
+ * is not present otherwise VF link is always treated as up
+ */
+static void nic_poll_for_link(struct work_struct *work)
+{
+ union nic_mbx mbx = {};
+ struct nicpf *nic;
+ struct bgx_link_status link;
+ u8 vf, bgx, lmac;
+
+ nic = container_of(work, struct nicpf, dwork.work);
+
+ mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+
+ for (vf = 0; vf < nic->num_vf_en; vf++) {
+ /* Poll only if VF is UP */
+ if (!nic->vf_enabled[vf])
+ continue;
+
+ /* Get BGX, LMAC indices for the VF */
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ /* Get interface link status */
+ bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
+
+ /* Inform VF only if link status changed */
+ if (nic->link[vf] == link.link_up)
+ continue;
+
+ if (!nic->mbx_lock[vf]) {
+ nic->link[vf] = link.link_up;
+ nic->duplex[vf] = link.duplex;
+ nic->speed[vf] = link.speed;
+
+ /* Send a mbox message to VF with current link status */
+ mbx.link_status.link_up = link.link_up;
+ mbx.link_status.duplex = link.duplex;
+ mbx.link_status.speed = link.speed;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+ }
+ }
+ queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
+}
+
+static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct nicpf *nic;
+ int err;
+
+ BUILD_BUG_ON(sizeof(union nic_mbx) > 16);
+
+ nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL);
+ if (!nic)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, nic);
+
+ nic->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
+ goto err_release_regions;
+ }
+
+ /* MAP PF's configuration registers */
+ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!nic->reg_base) {
+ dev_err(dev, "Cannot map config register space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id);
+
+ nic->node = nic_get_node_id(pdev);
+
+ nic_set_lmac_vf_mapping(nic);
+
+ /* Initialize hardware */
+ nic_init_hw(nic);
+
+ /* Set RSS TBL size for each VF */
+ nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+
+ /* Register interrupts */
+ err = nic_register_interrupts(nic);
+ if (err)
+ goto err_release_regions;
+
+ /* Configure SRIOV */
+ err = nic_sriov_init(pdev, nic);
+ if (err)
+ goto err_unregister_interrupts;
+
+ /* Register a physical link status poll fn() */
+ nic->check_link = alloc_workqueue("check_link_status",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!nic->check_link) {
+ err = -ENOMEM;
+ goto err_disable_sriov;
+ }
+
+ INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
+ queue_delayed_work(nic->check_link, &nic->dwork, 0);
+
+ return 0;
+
+err_disable_sriov:
+ if (nic->flags & NIC_SRIOV_ENABLED)
+ pci_disable_sriov(pdev);
+err_unregister_interrupts:
+ nic_unregister_interrupts(nic);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void nic_remove(struct pci_dev *pdev)
+{
+ struct nicpf *nic = pci_get_drvdata(pdev);
+
+ if (nic->flags & NIC_SRIOV_ENABLED)
+ pci_disable_sriov(pdev);
+
+ if (nic->check_link) {
+ /* Destroy work Queue */
+ cancel_delayed_work(&nic->dwork);
+ flush_workqueue(nic->check_link);
+ destroy_workqueue(nic->check_link);
+ }
+
+ nic_unregister_interrupts(nic);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver nic_driver = {
+ .name = DRV_NAME,
+ .id_table = nic_id_table,
+ .probe = nic_probe,
+ .remove = nic_remove,
+};
+
+static int __init nic_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&nic_driver);
+}
+
+static void __exit nic_cleanup_module(void)
+{
+ pci_unregister_driver(&nic_driver);
+}
+
+module_init(nic_init_module);
+module_exit(nic_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
new file mode 100644
index 000000000000..58197bb2f805
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NIC_REG_H
+#define NIC_REG_H
+
+#define NIC_PF_REG_COUNT 29573
+#define NIC_VF_REG_COUNT 249
+
+/* Physical function register offsets */
+#define NIC_PF_CFG (0x0000)
+#define NIC_PF_STATUS (0x0010)
+#define NIC_PF_INTR_TIMER_CFG (0x0030)
+#define NIC_PF_BIST_STATUS (0x0040)
+#define NIC_PF_SOFT_RESET (0x0050)
+#define NIC_PF_TCP_TIMER (0x0060)
+#define NIC_PF_BP_CFG (0x0080)
+#define NIC_PF_RRM_CFG (0x0088)
+#define NIC_PF_CQM_CF (0x00A0)
+#define NIC_PF_CNM_CF (0x00A8)
+#define NIC_PF_CNM_STATUS (0x00B0)
+#define NIC_PF_CQ_AVG_CFG (0x00C0)
+#define NIC_PF_RRM_AVG_CFG (0x00C8)
+#define NIC_PF_INTF_0_1_SEND_CFG (0x0200)
+#define NIC_PF_INTF_0_1_BP_CFG (0x0208)
+#define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210)
+#define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220)
+#define NIC_PF_RBDR_BP_STATE_0_3 (0x0240)
+#define NIC_PF_MAILBOX_INT (0x0410)
+#define NIC_PF_MAILBOX_INT_W1S (0x0430)
+#define NIC_PF_MAILBOX_ENA_W1C (0x0450)
+#define NIC_PF_MAILBOX_ENA_W1S (0x0470)
+#define NIC_PF_RX_ETYPE_0_7 (0x0500)
+#define NIC_PF_PKIND_0_15_CFG (0x0600)
+#define NIC_PF_ECC0_FLIP0 (0x1000)
+#define NIC_PF_ECC1_FLIP0 (0x1008)
+#define NIC_PF_ECC2_FLIP0 (0x1010)
+#define NIC_PF_ECC3_FLIP0 (0x1018)
+#define NIC_PF_ECC0_FLIP1 (0x1080)
+#define NIC_PF_ECC1_FLIP1 (0x1088)
+#define NIC_PF_ECC2_FLIP1 (0x1090)
+#define NIC_PF_ECC3_FLIP1 (0x1098)
+#define NIC_PF_ECC0_CDIS (0x1100)
+#define NIC_PF_ECC1_CDIS (0x1108)
+#define NIC_PF_ECC2_CDIS (0x1110)
+#define NIC_PF_ECC3_CDIS (0x1118)
+#define NIC_PF_BIST0_STATUS (0x1280)
+#define NIC_PF_BIST1_STATUS (0x1288)
+#define NIC_PF_BIST2_STATUS (0x1290)
+#define NIC_PF_BIST3_STATUS (0x1298)
+#define NIC_PF_ECC0_SBE_INT (0x2000)
+#define NIC_PF_ECC0_SBE_INT_W1S (0x2008)
+#define NIC_PF_ECC0_SBE_ENA_W1C (0x2010)
+#define NIC_PF_ECC0_SBE_ENA_W1S (0x2018)
+#define NIC_PF_ECC0_DBE_INT (0x2100)
+#define NIC_PF_ECC0_DBE_INT_W1S (0x2108)
+#define NIC_PF_ECC0_DBE_ENA_W1C (0x2110)
+#define NIC_PF_ECC0_DBE_ENA_W1S (0x2118)
+#define NIC_PF_ECC1_SBE_INT (0x2200)
+#define NIC_PF_ECC1_SBE_INT_W1S (0x2208)
+#define NIC_PF_ECC1_SBE_ENA_W1C (0x2210)
+#define NIC_PF_ECC1_SBE_ENA_W1S (0x2218)
+#define NIC_PF_ECC1_DBE_INT (0x2300)
+#define NIC_PF_ECC1_DBE_INT_W1S (0x2308)
+#define NIC_PF_ECC1_DBE_ENA_W1C (0x2310)
+#define NIC_PF_ECC1_DBE_ENA_W1S (0x2318)
+#define NIC_PF_ECC2_SBE_INT (0x2400)
+#define NIC_PF_ECC2_SBE_INT_W1S (0x2408)
+#define NIC_PF_ECC2_SBE_ENA_W1C (0x2410)
+#define NIC_PF_ECC2_SBE_ENA_W1S (0x2418)
+#define NIC_PF_ECC2_DBE_INT (0x2500)
+#define NIC_PF_ECC2_DBE_INT_W1S (0x2508)
+#define NIC_PF_ECC2_DBE_ENA_W1C (0x2510)
+#define NIC_PF_ECC2_DBE_ENA_W1S (0x2518)
+#define NIC_PF_ECC3_SBE_INT (0x2600)
+#define NIC_PF_ECC3_SBE_INT_W1S (0x2608)
+#define NIC_PF_ECC3_SBE_ENA_W1C (0x2610)
+#define NIC_PF_ECC3_SBE_ENA_W1S (0x2618)
+#define NIC_PF_ECC3_DBE_INT (0x2700)
+#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
+#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
+#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
+#define NIC_PF_CPI_0_2047_CFG (0x200000)
+#define NIC_PF_RSSI_0_4097_RQ (0x220000)
+#define NIC_PF_LMAC_0_7_CFG (0x240000)
+#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
+#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
+#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
+#define NIC_PF_CHAN_0_255_RX_CFG (0x420000)
+#define NIC_PF_CHAN_0_255_SW_XOFF (0x440000)
+#define NIC_PF_CHAN_0_255_CREDIT (0x460000)
+#define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000)
+#define NIC_PF_SW_SYNC_RX (0x490000)
+#define NIC_PF_SW_SYNC_RX_DONE (0x490008)
+#define NIC_PF_TL2_0_63_CFG (0x500000)
+#define NIC_PF_TL2_0_63_PRI (0x520000)
+#define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
+#define NIC_PF_TL3A_0_63_CFG (0x5F0000)
+#define NIC_PF_TL3_0_255_CFG (0x600000)
+#define NIC_PF_TL3_0_255_CHAN (0x620000)
+#define NIC_PF_TL3_0_255_PIR (0x640000)
+#define NIC_PF_TL3_0_255_SW_XOFF (0x660000)
+#define NIC_PF_TL3_0_255_CNM_RATE (0x680000)
+#define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000)
+#define NIC_PF_TL4A_0_255_CFG (0x6F0000)
+#define NIC_PF_TL4_0_1023_CFG (0x800000)
+#define NIC_PF_TL4_0_1023_SW_XOFF (0x820000)
+#define NIC_PF_TL4_0_1023_SH_STATUS (0x840000)
+#define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000)
+#define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000)
+#define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030)
+#define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000)
+#define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100)
+#define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000)
+#define NIC_PF_QSET_0_127_CFG (0x20010000)
+#define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400)
+#define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420)
+#define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500)
+#define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600)
+#define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00)
+#define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08)
+#define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00)
+
+#define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000)
+#define NIC_PF_MSIX_VEC_0_CTL (0x000008)
+#define NIC_PF_MSIX_PBA_0 (0x0F0000)
+
+/* Virtual function register offsets */
+#define NIC_VNIC_CFG (0x000020)
+#define NIC_VF_PF_MAILBOX_0_1 (0x000130)
+#define NIC_VF_INT (0x000200)
+#define NIC_VF_INT_W1S (0x000220)
+#define NIC_VF_ENA_W1C (0x000240)
+#define NIC_VF_ENA_W1S (0x000260)
+
+#define NIC_VNIC_RSS_CFG (0x0020E0)
+#define NIC_VNIC_RSS_KEY_0_4 (0x002200)
+#define NIC_VNIC_TX_STAT_0_4 (0x004000)
+#define NIC_VNIC_RX_STAT_0_13 (0x004100)
+#define NIC_QSET_RQ_GEN_CFG (0x010010)
+
+#define NIC_QSET_CQ_0_7_CFG (0x010400)
+#define NIC_QSET_CQ_0_7_CFG2 (0x010408)
+#define NIC_QSET_CQ_0_7_THRESH (0x010410)
+#define NIC_QSET_CQ_0_7_BASE (0x010420)
+#define NIC_QSET_CQ_0_7_HEAD (0x010428)
+#define NIC_QSET_CQ_0_7_TAIL (0x010430)
+#define NIC_QSET_CQ_0_7_DOOR (0x010438)
+#define NIC_QSET_CQ_0_7_STATUS (0x010440)
+#define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
+#define NIC_QSET_CQ_0_7_DEBUG (0x010450)
+
+#define NIC_QSET_RQ_0_7_CFG (0x010600)
+#define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700)
+
+#define NIC_QSET_SQ_0_7_CFG (0x010800)
+#define NIC_QSET_SQ_0_7_THRESH (0x010810)
+#define NIC_QSET_SQ_0_7_BASE (0x010820)
+#define NIC_QSET_SQ_0_7_HEAD (0x010828)
+#define NIC_QSET_SQ_0_7_TAIL (0x010830)
+#define NIC_QSET_SQ_0_7_DOOR (0x010838)
+#define NIC_QSET_SQ_0_7_STATUS (0x010840)
+#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
+#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
+#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
+
+#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
+#define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
+#define NIC_QSET_RBDR_0_1_BASE (0x010C20)
+#define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
+#define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
+#define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
+#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
+#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
+#define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50)
+
+#define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000)
+#define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008)
+#define NIC_VF_MSIX_PBA (0x0F0000)
+
+/* Offsets within registers */
+#define NIC_MSIX_VEC_SHIFT 4
+#define NIC_Q_NUM_SHIFT 18
+#define NIC_QS_ID_SHIFT 21
+#define NIC_VF_NUM_SHIFT 21
+
+/* Port kind configuration register */
+struct pkind_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_42_63:22;
+ u64 hdr_sl:5; /* Header skip length */
+ u64 rx_hdr:3; /* TNS Receive header present */
+ u64 lenerr_en:1;/* L2 length error check enable */
+ u64 reserved_32_32:1;
+ u64 maxlen:16; /* Max frame size */
+ u64 minlen:16; /* Min frame size */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 minlen:16;
+ u64 maxlen:16;
+ u64 reserved_32_32:1;
+ u64 lenerr_en:1;
+ u64 rx_hdr:3;
+ u64 hdr_sl:5;
+ u64 reserved_42_63:22;
+#endif
+};
+
+#endif /* NIC_REG_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
new file mode 100644
index 000000000000..16bd2d772db9
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -0,0 +1,600 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+/* ETHTOOL Support for VNIC_VF Device*/
+
+#include <linux/pci.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+#include "q_struct.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-nicvf"
+#define DRV_VERSION "1.0"
+
+struct nicvf_stat {
+ char name[ETH_GSTRING_LEN];
+ unsigned int index;
+};
+
+#define NICVF_HW_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \
+}
+
+#define NICVF_DRV_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \
+}
+
+static const struct nicvf_stat nicvf_hw_stats[] = {
+ NICVF_HW_STAT(rx_bytes_ok),
+ NICVF_HW_STAT(rx_ucast_frames_ok),
+ NICVF_HW_STAT(rx_bcast_frames_ok),
+ NICVF_HW_STAT(rx_mcast_frames_ok),
+ NICVF_HW_STAT(rx_fcs_errors),
+ NICVF_HW_STAT(rx_l2_errors),
+ NICVF_HW_STAT(rx_drop_red),
+ NICVF_HW_STAT(rx_drop_red_bytes),
+ NICVF_HW_STAT(rx_drop_overrun),
+ NICVF_HW_STAT(rx_drop_overrun_bytes),
+ NICVF_HW_STAT(rx_drop_bcast),
+ NICVF_HW_STAT(rx_drop_mcast),
+ NICVF_HW_STAT(rx_drop_l3_bcast),
+ NICVF_HW_STAT(rx_drop_l3_mcast),
+ NICVF_HW_STAT(tx_bytes_ok),
+ NICVF_HW_STAT(tx_ucast_frames_ok),
+ NICVF_HW_STAT(tx_bcast_frames_ok),
+ NICVF_HW_STAT(tx_mcast_frames_ok),
+};
+
+static const struct nicvf_stat nicvf_drv_stats[] = {
+ NICVF_DRV_STAT(rx_frames_ok),
+ NICVF_DRV_STAT(rx_frames_64),
+ NICVF_DRV_STAT(rx_frames_127),
+ NICVF_DRV_STAT(rx_frames_255),
+ NICVF_DRV_STAT(rx_frames_511),
+ NICVF_DRV_STAT(rx_frames_1023),
+ NICVF_DRV_STAT(rx_frames_1518),
+ NICVF_DRV_STAT(rx_frames_jumbo),
+ NICVF_DRV_STAT(rx_drops),
+ NICVF_DRV_STAT(tx_frames_ok),
+ NICVF_DRV_STAT(tx_busy),
+ NICVF_DRV_STAT(tx_tso),
+ NICVF_DRV_STAT(tx_drops),
+};
+
+static const struct nicvf_stat nicvf_queue_stats[] = {
+ { "bytes", 0 },
+ { "frames", 1 },
+};
+
+static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats);
+static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats);
+static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats);
+
+static int nicvf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ cmd->supported = 0;
+ cmd->transceiver = XCVR_EXTERNAL;
+ if (nic->speed <= 1000) {
+ cmd->port = PORT_MII;
+ cmd->autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->port = PORT_FIBRE;
+ cmd->autoneg = AUTONEG_DISABLE;
+ }
+ cmd->duplex = nic->duplex;
+ ethtool_cmd_speed_set(cmd, nic->speed);
+
+ return 0;
+}
+
+static void nicvf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
+}
+
+static u32 nicvf_get_msglevel(struct net_device *netdev)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ return nic->msg_enable;
+}
+
+static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ nic->msg_enable = lvl;
+}
+
+static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ int stats, qidx;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (stats = 0; stats < nicvf_n_hw_stats; stats++) {
+ memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < nicvf_n_drv_stats; stats++) {
+ memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
+ for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+ sprintf(data, "rxq%d: %s", qidx,
+ nicvf_queue_stats[stats].name);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+ for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+ sprintf(data, "txq%d: %s", qidx,
+ nicvf_queue_stats[stats].name);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
+ sprintf(data, "bgx_rxstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) {
+ sprintf(data, "bgx_txstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+static int nicvf_get_sset_count(struct net_device *netdev, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ return nicvf_n_hw_stats + nicvf_n_drv_stats +
+ (nicvf_n_queue_stats *
+ (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) +
+ BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
+}
+
+static void nicvf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int stat, qidx;
+
+ nicvf_update_stats(nic);
+
+ /* Update LMAC stats */
+ nicvf_update_lmac_stats(nic);
+
+ for (stat = 0; stat < nicvf_n_hw_stats; stat++)
+ *(data++) = ((u64 *)&nic->stats)
+ [nicvf_hw_stats[stat].index];
+ for (stat = 0; stat < nicvf_n_drv_stats; stat++)
+ *(data++) = ((u64 *)&nic->drv_stats)
+ [nicvf_drv_stats[stat].index];
+
+ for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
+ for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+ *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
+ [nicvf_queue_stats[stat].index];
+ }
+
+ for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+ for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+ *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
+ [nicvf_queue_stats[stat].index];
+ }
+
+ for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
+ *(data++) = nic->bgx_stats.rx_stats[stat];
+ for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++)
+ *(data++) = nic->bgx_stats.tx_stats[stat];
+}
+
+static int nicvf_get_regs_len(struct net_device *dev)
+{
+ return sizeof(u64) * NIC_VF_REG_COUNT;
+}
+
+static void nicvf_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *reg)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ u64 *p = (u64 *)reg;
+ u64 reg_offset;
+ int mbox, key, stat, q;
+ int i = 0;
+
+ regs->version = 0;
+ memset(p, 0, NIC_VF_REG_COUNT);
+
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG);
+ /* Mailbox registers */
+ for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VF_PF_MAILBOX_0_1 | (mbox << 3));
+
+ p[i++] = nicvf_reg_read(nic, NIC_VF_INT);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
+ for (key = 0; key < RSS_HASH_KEY_SIZE; key++)
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3));
+
+ /* Tx/Rx statistics */
+ for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VNIC_TX_STAT_0_4 | (stat << 3));
+
+ for (i = 0; i < RX_STATS_ENUM_LAST; i++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VNIC_RX_STAT_0_13 | (stat << 3));
+
+ p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG);
+
+ /* All completion queue's registers */
+ for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q);
+ }
+
+ /* All receive queue's registers */
+ for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RQ_0_7_STAT_0_1, q);
+ reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3);
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+
+ for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
+ reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+
+ for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_STATUS0, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_STATUS1, q);
+ reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS;
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+}
+
+static int nicvf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs;
+ return 0;
+}
+
+static void nicvf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+
+ ring->rx_max_pending = MAX_RCV_BUF_COUNT;
+ ring->rx_pending = qs->rbdr_len;
+ ring->tx_max_pending = MAX_SND_QUEUE_LEN;
+ ring->tx_pending = qs->sq_len;
+}
+
+static int nicvf_get_rss_hash_opts(struct nicvf *nic,
+ struct ethtool_rxnfc *info)
+{
+ info->data = 0;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nicvf_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rules)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = nic->qs->rq_cnt;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ return nicvf_get_rss_hash_opts(nic, info);
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int nicvf_set_rss_hash_opts(struct nicvf *nic,
+ struct ethtool_rxnfc *info)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
+ if (!rss->enable)
+ netdev_err(nic->netdev,
+ "RSS is disabled, hash cannot be set\n");
+
+ netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
+ info->flow_type, info->data);
+
+ if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_UDP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_UDP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_L4ETC);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_L4ETC);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ rss_cfg = RSS_HASH_IP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg);
+ return 0;
+}
+
+static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return nicvf_set_rss_hash_opts(nic, info);
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
+{
+ return RSS_HASH_KEY_SIZE * sizeof(u64);
+}
+
+static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ return nic->rss_info.rss_size;
+}
+
+static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
+ u8 *hfunc)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss->ind_tbl[idx];
+ }
+
+ if (hkey)
+ memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, u8 hfunc)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) {
+ rss->enable = false;
+ rss->hash_bits = 0;
+ return -EIO;
+ }
+
+ /* We do not allow change in unsupported parameters */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ rss->enable = true;
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] = indir[idx];
+ }
+
+ if (hkey) {
+ memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
+ nicvf_set_rss_key(nic);
+ }
+
+ nicvf_config_rss(nic);
+ return 0;
+}
+
+/* Get no of queues device supports and current queue count */
+static void nicvf_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ memset(channel, 0, sizeof(*channel));
+
+ channel->max_rx = MAX_RCV_QUEUES_PER_QS;
+ channel->max_tx = MAX_SND_QUEUES_PER_QS;
+
+ channel->rx_count = nic->qs->rq_cnt;
+ channel->tx_count = nic->qs->sq_cnt;
+}
+
+/* Set no of Tx, Rx queues to be used */
+static int nicvf_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ int err = 0;
+
+ if (!channel->rx_count || !channel->tx_count)
+ return -EINVAL;
+ if (channel->rx_count > MAX_RCV_QUEUES_PER_QS)
+ return -EINVAL;
+ if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
+ return -EINVAL;
+
+ nic->qs->rq_cnt = channel->rx_count;
+ nic->qs->sq_cnt = channel->tx_count;
+ nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
+
+ err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt);
+ if (err)
+ return err;
+
+ if (!netif_running(dev))
+ return err;
+
+ nicvf_stop(dev);
+ nicvf_open(dev);
+ netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
+ nic->qs->sq_cnt, nic->qs->rq_cnt);
+
+ return err;
+}
+
+static const struct ethtool_ops nicvf_ethtool_ops = {
+ .get_settings = nicvf_get_settings,
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = nicvf_get_drvinfo,
+ .get_msglevel = nicvf_get_msglevel,
+ .set_msglevel = nicvf_set_msglevel,
+ .get_strings = nicvf_get_strings,
+ .get_sset_count = nicvf_get_sset_count,
+ .get_ethtool_stats = nicvf_get_ethtool_stats,
+ .get_regs_len = nicvf_get_regs_len,
+ .get_regs = nicvf_get_regs,
+ .get_coalesce = nicvf_get_coalesce,
+ .get_ringparam = nicvf_get_ringparam,
+ .get_rxnfc = nicvf_get_rxnfc,
+ .set_rxnfc = nicvf_set_rxnfc,
+ .get_rxfh_key_size = nicvf_get_rxfh_key_size,
+ .get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
+ .get_rxfh = nicvf_get_rxfh,
+ .set_rxfh = nicvf_set_rxfh,
+ .get_channels = nicvf_get_channels,
+ .set_channels = nicvf_set_channels,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+void nicvf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &nicvf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
new file mode 100644
index 000000000000..02da802d3288
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -0,0 +1,1331 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/log2.h>
+#include <linux/prefetch.h>
+#include <linux/irq.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-nicvf"
+#define DRV_VERSION "1.0"
+
+/* Supported devices */
+static const struct pci_device_id nicvf_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham");
+MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, nicvf_id_table);
+
+static int debug = 0x00;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug message level bitmap");
+
+static int cpi_alg = CPI_ALG_NONE;
+module_param(cpi_alg, int, S_IRUGO);
+MODULE_PARM_DESC(cpi_alg,
+ "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
+
+static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
+ struct sk_buff *skb)
+{
+ if (skb->len <= 64)
+ nic->drv_stats.rx_frames_64++;
+ else if (skb->len <= 127)
+ nic->drv_stats.rx_frames_127++;
+ else if (skb->len <= 255)
+ nic->drv_stats.rx_frames_255++;
+ else if (skb->len <= 511)
+ nic->drv_stats.rx_frames_511++;
+ else if (skb->len <= 1023)
+ nic->drv_stats.rx_frames_1023++;
+ else if (skb->len <= 1518)
+ nic->drv_stats.rx_frames_1518++;
+ else
+ nic->drv_stats.rx_frames_jumbo++;
+}
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
+{
+ writeq_relaxed(val, nic->reg_base + offset);
+}
+
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
+{
+ return readq_relaxed(nic->reg_base + offset);
+}
+
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+ u64 qidx, u64 val)
+{
+ void __iomem *addr = nic->reg_base + offset;
+
+ writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
+}
+
+u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
+{
+ void __iomem *addr = nic->reg_base + offset;
+
+ return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
+}
+
+/* VF -> PF mailbox communication */
+
+static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
+{
+ u64 *msg = (u64 *)mbx;
+
+ nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
+ nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
+}
+
+int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
+{
+ int timeout = NIC_MBOX_MSG_TIMEOUT;
+ int sleep = 10;
+
+ nic->pf_acked = false;
+ nic->pf_nacked = false;
+
+ nicvf_write_to_mbx(nic, mbx);
+
+ /* Wait for previous message to be acked, timeout 2sec */
+ while (!nic->pf_acked) {
+ if (nic->pf_nacked)
+ return -EINVAL;
+ msleep(sleep);
+ if (nic->pf_acked)
+ break;
+ timeout -= sleep;
+ if (!timeout) {
+ netdev_err(nic->netdev,
+ "PF didn't ack to mbox msg %d from VF%d\n",
+ (mbx->msg.msg & 0xFF), nic->vf_id);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+/* Checks if VF is able to comminicate with PF
+* and also gets the VNIC number this VF is associated to.
+*/
+static int nicvf_check_pf_ready(struct nicvf *nic)
+{
+ int timeout = 5000, sleep = 20;
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_READY;
+
+ nic->pf_ready_to_rcv_msg = false;
+
+ nicvf_write_to_mbx(nic, &mbx);
+
+ while (!nic->pf_ready_to_rcv_msg) {
+ msleep(sleep);
+ if (nic->pf_ready_to_rcv_msg)
+ break;
+ timeout -= sleep;
+ if (!timeout) {
+ netdev_err(nic->netdev,
+ "PF didn't respond to READY msg\n");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
+{
+ if (bgx->rx)
+ nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
+ else
+ nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
+}
+
+static void nicvf_handle_mbx_intr(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ u64 *mbx_data;
+ u64 mbx_addr;
+ int i;
+
+ mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+ mbx_data = (u64 *)&mbx;
+
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ *mbx_data = nicvf_reg_read(nic, mbx_addr);
+ mbx_data++;
+ mbx_addr += sizeof(u64);
+ }
+
+ netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
+ switch (mbx.msg.msg) {
+ case NIC_MBOX_MSG_READY:
+ nic->pf_ready_to_rcv_msg = true;
+ nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
+ nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
+ nic->node = mbx.nic_cfg.node_id;
+ ether_addr_copy(nic->netdev->dev_addr, mbx.nic_cfg.mac_addr);
+ nic->link_up = false;
+ nic->duplex = 0;
+ nic->speed = 0;
+ break;
+ case NIC_MBOX_MSG_ACK:
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_NACK:
+ nic->pf_nacked = true;
+ break;
+ case NIC_MBOX_MSG_RSS_SIZE:
+ nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_BGX_STATS:
+ nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
+ nic->pf_acked = true;
+ nic->bgx_stats_acked = true;
+ break;
+ case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+ nic->pf_acked = true;
+ nic->link_up = mbx.link_status.link_up;
+ nic->duplex = mbx.link_status.duplex;
+ nic->speed = mbx.link_status.speed;
+ if (nic->link_up) {
+ netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
+ nic->netdev->name, nic->speed,
+ nic->duplex == DUPLEX_FULL ?
+ "Full duplex" : "Half duplex");
+ netif_carrier_on(nic->netdev);
+ netif_tx_wake_all_queues(nic->netdev);
+ } else {
+ netdev_info(nic->netdev, "%s: Link is Down\n",
+ nic->netdev->name);
+ netif_carrier_off(nic->netdev);
+ netif_tx_stop_all_queues(nic->netdev);
+ }
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
+ break;
+ }
+ nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
+}
+
+static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
+{
+ union nic_mbx mbx = {};
+
+ mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
+ mbx.mac.vf_id = nic->vf_id;
+ ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_config_cpi(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
+ mbx.cpi_cfg.vf_id = nic->vf_id;
+ mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
+ mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
+
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_get_rss_size(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
+ mbx.rss_size.vf_id = nic->vf_id;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+void nicvf_config_rss(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int ind_tbl_len = rss->rss_size;
+ int i, nextq = 0;
+
+ mbx.rss_cfg.vf_id = nic->vf_id;
+ mbx.rss_cfg.hash_bits = rss->hash_bits;
+ while (ind_tbl_len) {
+ mbx.rss_cfg.tbl_offset = nextq;
+ mbx.rss_cfg.tbl_len = min(ind_tbl_len,
+ RSS_IND_TBL_LEN_PER_MBX_MSG);
+ mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
+ NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
+
+ for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
+ mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
+
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ ind_tbl_len -= mbx.rss_cfg.tbl_len;
+ }
+}
+
+void nicvf_set_rss_key(struct nicvf *nic)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
+ int idx;
+
+ for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
+ nicvf_reg_write(nic, key_addr, rss->key[idx]);
+ key_addr += sizeof(u64);
+ }
+}
+
+static int nicvf_rss_init(struct nicvf *nic)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ nicvf_get_rss_size(nic);
+
+ if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) {
+ rss->enable = false;
+ rss->hash_bits = 0;
+ return 0;
+ }
+
+ rss->enable = true;
+
+ /* Using the HW reset value for now */
+ rss->key[0] = 0xFEED0BADFEED0BADULL;
+ rss->key[1] = 0xFEED0BADFEED0BADULL;
+ rss->key[2] = 0xFEED0BADFEED0BADULL;
+ rss->key[3] = 0xFEED0BADFEED0BADULL;
+ rss->key[4] = 0xFEED0BADFEED0BADULL;
+
+ nicvf_set_rss_key(nic);
+
+ rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
+ nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
+
+ rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
+
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
+ nic->qs->rq_cnt);
+ nicvf_config_rss(nic);
+ return 1;
+}
+
+int nicvf_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues)
+{
+ int err = 0;
+
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set no of Tx queues: %d\n", tx_queues);
+ return err;
+ }
+
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev,
+ "Failed to set no of Rx queues: %d\n", rx_queues);
+ return err;
+}
+
+static int nicvf_init_resources(struct nicvf *nic)
+{
+ int err;
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+
+ /* Enable Qset */
+ nicvf_qset_config(nic, true);
+
+ /* Initialize queues and HW for data transfer */
+ err = nicvf_config_data_transfer(nic, true);
+ if (err) {
+ netdev_err(nic->netdev,
+ "Failed to alloc/config VF's QSet resources\n");
+ return err;
+ }
+
+ /* Send VF config done msg to PF */
+ nicvf_write_to_mbx(nic, &mbx);
+
+ return 0;
+}
+
+static void nicvf_snd_pkt_handler(struct net_device *netdev,
+ struct cmp_queue *cq,
+ struct cqe_send_t *cqe_tx, int cqe_type)
+{
+ struct sk_buff *skb = NULL;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct snd_queue *sq;
+ struct sq_hdr_subdesc *hdr;
+
+ sq = &nic->qs->sq[cqe_tx->sq_idx];
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
+ if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
+ return;
+
+ netdev_dbg(nic->netdev,
+ "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
+ __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
+ cqe_tx->sqe_ptr, hdr->subdesc_cnt);
+
+ nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+ nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
+ skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
+ /* For TSO offloaded packets only one head SKB needs to be freed */
+ if (skb) {
+ prefetch(skb);
+ dev_consume_skb_any(skb);
+ }
+}
+
+static void nicvf_rcv_pkt_handler(struct net_device *netdev,
+ struct napi_struct *napi,
+ struct cmp_queue *cq,
+ struct cqe_rx_t *cqe_rx, int cqe_type)
+{
+ struct sk_buff *skb;
+ struct nicvf *nic = netdev_priv(netdev);
+ int err = 0;
+
+ /* Check for errors */
+ err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
+ if (err && !cqe_rx->rb_cnt)
+ return;
+
+ skb = nicvf_get_rcv_skb(nic, cqe_rx);
+ if (!skb) {
+ netdev_dbg(nic->netdev, "Packet not received\n");
+ return;
+ }
+
+ if (netif_msg_pktdata(nic)) {
+ netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
+ skb, skb->len);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb->len, true);
+ }
+
+ nicvf_set_rx_frame_cnt(nic, skb);
+
+ skb_record_rx_queue(skb, cqe_rx->rq_idx);
+ if (netdev->hw_features & NETIF_F_RXCSUM) {
+ /* HW by default verifies TCP/UDP/SCTP checksums */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (napi && (netdev->features & NETIF_F_GRO))
+ napi_gro_receive(napi, skb);
+ else
+ netif_receive_skb(skb);
+}
+
+static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
+ struct napi_struct *napi, int budget)
+{
+ int processed_cqe, work_done = 0;
+ int cqe_count, cqe_head;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct cmp_queue *cq = &qs->cq[cq_idx];
+ struct cqe_rx_t *cq_desc;
+
+ spin_lock_bh(&cq->lock);
+loop:
+ processed_cqe = 0;
+ /* Get no of valid CQ entries to process */
+ cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
+ cqe_count &= CQ_CQE_COUNT;
+ if (!cqe_count)
+ goto done;
+
+ /* Get head of the valid CQ entries */
+ cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
+ cqe_head &= 0xFFFF;
+
+ netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n",
+ __func__, cqe_count, cqe_head);
+ while (processed_cqe < cqe_count) {
+ /* Get the CQ descriptor */
+ cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
+ cqe_head++;
+ cqe_head &= (cq->dmem.q_len - 1);
+ /* Initiate prefetch for next descriptor */
+ prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
+
+ if ((work_done >= budget) && napi &&
+ (cq_desc->cqe_type != CQE_TYPE_SEND)) {
+ break;
+ }
+
+ netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n",
+ cq_desc->cqe_type);
+ switch (cq_desc->cqe_type) {
+ case CQE_TYPE_RX:
+ nicvf_rcv_pkt_handler(netdev, napi, cq,
+ cq_desc, CQE_TYPE_RX);
+ work_done++;
+ break;
+ case CQE_TYPE_SEND:
+ nicvf_snd_pkt_handler(netdev, cq,
+ (void *)cq_desc, CQE_TYPE_SEND);
+ break;
+ case CQE_TYPE_INVALID:
+ case CQE_TYPE_RX_SPLIT:
+ case CQE_TYPE_RX_TCP:
+ case CQE_TYPE_SEND_PTP:
+ /* Ignore for now */
+ break;
+ }
+ processed_cqe++;
+ }
+ netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n",
+ __func__, processed_cqe, work_done, budget);
+
+ /* Ring doorbell to inform H/W to reuse processed CQEs */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
+ cq_idx, processed_cqe);
+
+ if ((work_done < budget) && napi)
+ goto loop;
+
+done:
+ spin_unlock_bh(&cq->lock);
+ return work_done;
+}
+
+static int nicvf_poll(struct napi_struct *napi, int budget)
+{
+ u64 cq_head;
+ int work_done = 0;
+ struct net_device *netdev = napi->dev;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf_cq_poll *cq;
+ struct netdev_queue *txq;
+
+ cq = container_of(napi, struct nicvf_cq_poll, napi);
+ work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
+
+ txq = netdev_get_tx_queue(netdev, cq->cq_idx);
+ if (netif_tx_queue_stopped(txq))
+ netif_tx_wake_queue(txq);
+
+ if (work_done < budget) {
+ /* Slow packet rate, exit polling */
+ napi_complete(napi);
+ /* Re-enable interrupts */
+ cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
+ cq->cq_idx);
+ nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
+ cq->cq_idx, cq_head);
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
+ }
+ return work_done;
+}
+
+/* Qset error interrupt handler
+ *
+ * As of now only CQ errors are handled
+ */
+static void nicvf_handle_qs_err(unsigned long data)
+{
+ struct nicvf *nic = (struct nicvf *)data;
+ struct queue_set *qs = nic->qs;
+ int qidx;
+ u64 status;
+
+ netif_tx_disable(nic->netdev);
+
+ /* Check if it is CQ err */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
+ qidx);
+ if (!(status & CQ_ERR_MASK))
+ continue;
+ /* Process already queued CQEs and reconfig CQ */
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+ nicvf_sq_disable(nic, qidx);
+ nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
+ nicvf_cmp_queue_config(nic, qs, qidx, true);
+ nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
+ nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
+
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
+ }
+
+ netif_tx_start_all_queues(nic->netdev);
+ /* Re-enable Qset error interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
+}
+
+static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
+{
+ struct nicvf *nic = (struct nicvf *)nicvf_irq;
+ u64 intr;
+
+ intr = nicvf_reg_read(nic, NIC_VF_INT);
+ /* Check for spurious interrupt */
+ if (!(intr & NICVF_INTR_MBOX_MASK))
+ return IRQ_HANDLED;
+
+ nicvf_handle_mbx_intr(nic);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq)
+{
+ u64 qidx, intr, clear_intr = 0;
+ u64 cq_intr, rbdr_intr, qs_err_intr;
+ struct nicvf *nic = (struct nicvf *)nicvf_irq;
+ struct queue_set *qs = nic->qs;
+ struct nicvf_cq_poll *cq_poll = NULL;
+
+ intr = nicvf_reg_read(nic, NIC_VF_INT);
+ if (netif_msg_intr(nic))
+ netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
+ nic->netdev->name, intr);
+
+ qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK;
+ if (qs_err_intr) {
+ /* Disable Qset err interrupt and schedule softirq */
+ nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+ tasklet_hi_schedule(&nic->qs_err_task);
+ clear_intr |= qs_err_intr;
+ }
+
+ /* Disable interrupts and start polling */
+ cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT;
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ if (!(cq_intr & (1 << qidx)))
+ continue;
+ if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx))
+ continue;
+
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+ clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT);
+
+ cq_poll = nic->napi[qidx];
+ /* Schedule NAPI */
+ if (cq_poll)
+ napi_schedule(&cq_poll->napi);
+ }
+
+ /* Handle RBDR interrupts */
+ rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT;
+ if (rbdr_intr) {
+ /* Disable RBDR interrupt and schedule softirq */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
+ continue;
+ nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+ tasklet_hi_schedule(&nic->rbdr_task);
+ clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT);
+ }
+ }
+
+ /* Clear interrupts */
+ nicvf_reg_write(nic, NIC_VF_INT, clear_intr);
+ return IRQ_HANDLED;
+}
+
+static int nicvf_enable_msix(struct nicvf *nic)
+{
+ int ret, vec;
+
+ nic->num_vec = NIC_VF_MSIX_VECTORS;
+
+ for (vec = 0; vec < nic->num_vec; vec++)
+ nic->msix_entries[vec].entry = vec;
+
+ ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
+ if (ret) {
+ netdev_err(nic->netdev,
+ "Req for #%d msix vectors failed\n", nic->num_vec);
+ return 0;
+ }
+ nic->msix_enabled = 1;
+ return 1;
+}
+
+static void nicvf_disable_msix(struct nicvf *nic)
+{
+ if (nic->msix_enabled) {
+ pci_disable_msix(nic->pdev);
+ nic->msix_enabled = 0;
+ nic->num_vec = 0;
+ }
+}
+
+static int nicvf_register_interrupts(struct nicvf *nic)
+{
+ int irq, free, ret = 0;
+ int vector;
+
+ for_each_cq_irq(irq)
+ sprintf(nic->irq_name[irq], "NICVF%d CQ%d",
+ nic->vf_id, irq);
+
+ for_each_sq_irq(irq)
+ sprintf(nic->irq_name[irq], "NICVF%d SQ%d",
+ nic->vf_id, irq - NICVF_INTR_ID_SQ);
+
+ for_each_rbdr_irq(irq)
+ sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
+ nic->vf_id, irq - NICVF_INTR_ID_RBDR);
+
+ /* Register all interrupts except mailbox */
+ for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) {
+ vector = nic->msix_entries[irq].vector;
+ ret = request_irq(vector, nicvf_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (ret)
+ break;
+ nic->irq_allocated[irq] = true;
+ }
+
+ for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) {
+ vector = nic->msix_entries[irq].vector;
+ ret = request_irq(vector, nicvf_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (ret)
+ break;
+ nic->irq_allocated[irq] = true;
+ }
+
+ sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
+ "NICVF%d Qset error", nic->vf_id);
+ if (!ret) {
+ vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector;
+ irq = NICVF_INTR_ID_QS_ERR;
+ ret = request_irq(vector, nicvf_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (!ret)
+ nic->irq_allocated[irq] = true;
+ }
+
+ if (ret) {
+ netdev_err(nic->netdev, "Request irq failed\n");
+ for (free = 0; free < irq; free++)
+ free_irq(nic->msix_entries[free].vector, nic);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void nicvf_unregister_interrupts(struct nicvf *nic)
+{
+ int irq;
+
+ /* Free registered interrupts */
+ for (irq = 0; irq < nic->num_vec; irq++) {
+ if (nic->irq_allocated[irq])
+ free_irq(nic->msix_entries[irq].vector, nic);
+ nic->irq_allocated[irq] = false;
+ }
+
+ /* Disable MSI-X */
+ nicvf_disable_msix(nic);
+}
+
+/* Initialize MSIX vectors and register MISC interrupt.
+ * Send READY message to PF to check if its alive
+ */
+static int nicvf_register_misc_interrupt(struct nicvf *nic)
+{
+ int ret = 0;
+ int irq = NICVF_INTR_ID_MISC;
+
+ /* Return if mailbox interrupt is already registered */
+ if (nic->msix_enabled)
+ return 0;
+
+ /* Enable MSI-X */
+ if (!nicvf_enable_msix(nic))
+ return 1;
+
+ sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
+ /* Register Misc interrupt */
+ ret = request_irq(nic->msix_entries[irq].vector,
+ nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
+
+ if (ret)
+ return ret;
+ nic->irq_allocated[irq] = true;
+
+ /* Enable mailbox interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
+
+ /* Check if VF is able to communicate with PF */
+ if (!nicvf_check_pf_ready(nic)) {
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+ nicvf_unregister_interrupts(nic);
+ return 1;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int qid = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
+
+ /* Check for minimum packet length */
+ if (skb->len <= ETH_HLEN) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
+ nic->drv_stats.tx_busy++;
+ if (netif_msg_tx_err(nic))
+ netdev_warn(netdev,
+ "%s: Transmit ring full, stopping SQ%d\n",
+ netdev->name, qid);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+int nicvf_stop(struct net_device *netdev)
+{
+ int irq, qidx;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct nicvf_cq_poll *cq_poll = NULL;
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ /* Disable RBDR & QS error interrupts */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+ nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
+ }
+ nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+ nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+ /* Wait for pending IRQ handlers to finish */
+ for (irq = 0; irq < nic->num_vec; irq++)
+ synchronize_irq(nic->msix_entries[irq].vector);
+
+ tasklet_kill(&nic->rbdr_task);
+ tasklet_kill(&nic->qs_err_task);
+ if (nic->rb_work_scheduled)
+ cancel_delayed_work_sync(&nic->rbdr_work);
+
+ for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
+ cq_poll = nic->napi[qidx];
+ if (!cq_poll)
+ continue;
+ nic->napi[qidx] = NULL;
+ napi_synchronize(&cq_poll->napi);
+ /* CQ intr is enabled while napi_complete,
+ * so disable it now
+ */
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+ nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
+ napi_disable(&cq_poll->napi);
+ netif_napi_del(&cq_poll->napi);
+ kfree(cq_poll);
+ }
+
+ /* Free resources */
+ nicvf_config_data_transfer(nic, false);
+
+ /* Disable HW Qset */
+ nicvf_qset_config(nic, false);
+
+ /* disable mailbox interrupt */
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+
+ nicvf_unregister_interrupts(nic);
+
+ return 0;
+}
+
+int nicvf_open(struct net_device *netdev)
+{
+ int err, qidx;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct nicvf_cq_poll *cq_poll = NULL;
+
+ nic->mtu = netdev->mtu;
+
+ netif_carrier_off(netdev);
+
+ err = nicvf_register_misc_interrupt(nic);
+ if (err)
+ return err;
+
+ /* Register NAPI handler for processing CQEs */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
+ if (!cq_poll) {
+ err = -ENOMEM;
+ goto napi_del;
+ }
+ cq_poll->cq_idx = qidx;
+ netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&cq_poll->napi);
+ nic->napi[qidx] = cq_poll;
+ }
+
+ /* Check if we got MAC address from PF or else generate a radom MAC */
+ if (is_zero_ether_addr(netdev->dev_addr)) {
+ eth_hw_addr_random(netdev);
+ nicvf_hw_set_mac_addr(nic, netdev);
+ }
+
+ /* Init tasklet for handling Qset err interrupt */
+ tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
+ (unsigned long)nic);
+
+ /* Init RBDR tasklet which will refill RBDR */
+ tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
+ (unsigned long)nic);
+ INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
+
+ /* Configure CPI alorithm */
+ nic->cpi_alg = cpi_alg;
+ nicvf_config_cpi(nic);
+
+ /* Configure receive side scaling */
+ nicvf_rss_init(nic);
+
+ err = nicvf_register_interrupts(nic);
+ if (err)
+ goto cleanup;
+
+ /* Initialize the queues */
+ err = nicvf_init_resources(nic);
+ if (err)
+ goto cleanup;
+
+ /* Make sure queue initialization is written */
+ wmb();
+
+ nicvf_reg_write(nic, NIC_VF_INT, -1);
+ /* Enable Qset err interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+ /* Enable completion queue interrupt */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
+
+ /* Enable RBDR threshold interrupt */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
+
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+cleanup:
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+ nicvf_unregister_interrupts(nic);
+napi_del:
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ cq_poll = nic->napi[qidx];
+ if (!cq_poll)
+ continue;
+ napi_disable(&cq_poll->napi);
+ netif_napi_del(&cq_poll->napi);
+ kfree(cq_poll);
+ nic->napi[qidx] = NULL;
+ }
+ return err;
+}
+
+static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
+{
+ union nic_mbx mbx = {};
+
+ mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
+ mbx.frs.max_frs = mtu;
+ mbx.frs.vf_id = nic->vf_id;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (new_mtu > NIC_HW_MAX_FRS)
+ return -EINVAL;
+
+ if (new_mtu < NIC_HW_MIN_FRS)
+ return -EINVAL;
+
+ if (nicvf_update_hw_max_frs(nic, new_mtu))
+ return -EINVAL;
+ netdev->mtu = new_mtu;
+ nic->mtu = new_mtu;
+
+ return 0;
+}
+
+static int nicvf_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ if (nic->msix_enabled)
+ if (nicvf_hw_set_mac_addr(nic, netdev))
+ return -EBUSY;
+
+ return 0;
+}
+
+void nicvf_update_lmac_stats(struct nicvf *nic)
+{
+ int stat = 0;
+ union nic_mbx mbx = {};
+ int timeout;
+
+ if (!netif_running(nic->netdev))
+ return;
+
+ mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
+ mbx.bgx_stats.vf_id = nic->vf_id;
+ /* Rx stats */
+ mbx.bgx_stats.rx = 1;
+ while (stat < BGX_RX_STATS_COUNT) {
+ nic->bgx_stats_acked = 0;
+ mbx.bgx_stats.idx = stat;
+ nicvf_send_msg_to_pf(nic, &mbx);
+ timeout = 0;
+ while ((!nic->bgx_stats_acked) && (timeout < 10)) {
+ msleep(2);
+ timeout++;
+ }
+ stat++;
+ }
+
+ stat = 0;
+
+ /* Tx stats */
+ mbx.bgx_stats.rx = 0;
+ while (stat < BGX_TX_STATS_COUNT) {
+ nic->bgx_stats_acked = 0;
+ mbx.bgx_stats.idx = stat;
+ nicvf_send_msg_to_pf(nic, &mbx);
+ timeout = 0;
+ while ((!nic->bgx_stats_acked) && (timeout < 10)) {
+ msleep(2);
+ timeout++;
+ }
+ stat++;
+ }
+}
+
+void nicvf_update_stats(struct nicvf *nic)
+{
+ int qidx;
+ struct nicvf_hw_stats *stats = &nic->stats;
+ struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+ struct queue_set *qs = nic->qs;
+
+#define GET_RX_STATS(reg) \
+ nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
+#define GET_TX_STATS(reg) \
+ nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
+
+ stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS);
+ stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST);
+ stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST);
+ stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST);
+ stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
+ stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
+ stats->rx_drop_red = GET_RX_STATS(RX_RED);
+ stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
+ stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
+ stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
+ stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
+ stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
+
+ stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
+ stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
+ stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
+ stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
+ stats->tx_drops = GET_TX_STATS(TX_DROP);
+
+ drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok +
+ stats->rx_bcast_frames_ok +
+ stats->rx_mcast_frames_ok;
+ drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
+ stats->tx_bcast_frames_ok +
+ stats->tx_mcast_frames_ok;
+ drv_stats->rx_drops = stats->rx_drop_red +
+ stats->rx_drop_overrun;
+ drv_stats->tx_drops = stats->tx_drops;
+
+ /* Update RQ and SQ stats */
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_update_rq_stats(nic, qidx);
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_update_sq_stats(nic, qidx);
+}
+
+static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf_hw_stats *hw_stats = &nic->stats;
+ struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+
+ nicvf_update_stats(nic);
+
+ stats->rx_bytes = hw_stats->rx_bytes_ok;
+ stats->rx_packets = drv_stats->rx_frames_ok;
+ stats->rx_dropped = drv_stats->rx_drops;
+
+ stats->tx_bytes = hw_stats->tx_bytes_ok;
+ stats->tx_packets = drv_stats->tx_frames_ok;
+ stats->tx_dropped = drv_stats->tx_drops;
+
+ return stats;
+}
+
+static void nicvf_tx_timeout(struct net_device *dev)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ if (netif_msg_tx_err(nic))
+ netdev_warn(dev, "%s: Transmit timed out, resetting\n",
+ dev->name);
+
+ schedule_work(&nic->reset_task);
+}
+
+static void nicvf_reset_task(struct work_struct *work)
+{
+ struct nicvf *nic;
+
+ nic = container_of(work, struct nicvf, reset_task);
+
+ if (!netif_running(nic->netdev))
+ return;
+
+ nicvf_stop(nic->netdev);
+ nicvf_open(nic->netdev);
+ nic->netdev->trans_start = jiffies;
+}
+
+static const struct net_device_ops nicvf_netdev_ops = {
+ .ndo_open = nicvf_open,
+ .ndo_stop = nicvf_stop,
+ .ndo_start_xmit = nicvf_xmit,
+ .ndo_change_mtu = nicvf_change_mtu,
+ .ndo_set_mac_address = nicvf_set_mac_address,
+ .ndo_get_stats64 = nicvf_get_stats64,
+ .ndo_tx_timeout = nicvf_tx_timeout,
+};
+
+static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct nicvf *nic;
+ struct queue_set *qs;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
+ goto err_release_regions;
+ }
+
+ netdev = alloc_etherdev_mqs(sizeof(struct nicvf),
+ MAX_RCV_QUEUES_PER_QS,
+ MAX_SND_QUEUES_PER_QS);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ nic = netdev_priv(netdev);
+ nic->netdev = netdev;
+ nic->pdev = pdev;
+
+ /* MAP VF's configuration registers */
+ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!nic->reg_base) {
+ dev_err(dev, "Cannot map config register space, aborting\n");
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ err = nicvf_set_qset_resources(nic);
+ if (err)
+ goto err_free_netdev;
+
+ qs = nic->qs;
+
+ err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt);
+ if (err)
+ goto err_free_netdev;
+
+ /* Check if PF is alive and get MAC address for this VF */
+ err = nicvf_register_misc_interrupt(nic);
+ if (err)
+ goto err_free_netdev;
+
+ netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_GRO);
+ netdev->hw_features = netdev->features;
+
+ netdev->netdev_ops = &nicvf_netdev_ops;
+
+ INIT_WORK(&nic->reset_task, nicvf_reset_task);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(dev, "Failed to register netdevice\n");
+ goto err_unregister_interrupts;
+ }
+
+ nic->msg_enable = debug;
+
+ nicvf_set_ethtool_ops(netdev);
+
+ return 0;
+
+err_unregister_interrupts:
+ nicvf_unregister_interrupts(nic);
+err_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void nicvf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nicvf *nic = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+ nicvf_unregister_interrupts(nic);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver nicvf_driver = {
+ .name = DRV_NAME,
+ .id_table = nicvf_id_table,
+ .probe = nicvf_probe,
+ .remove = nicvf_remove,
+};
+
+static int __init nicvf_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&nicvf_driver);
+}
+
+static void __exit nicvf_cleanup_module(void)
+{
+ pci_unregister_driver(&nicvf_driver);
+}
+
+module_init(nicvf_init_module);
+module_exit(nicvf_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
new file mode 100644
index 000000000000..d69d228d11a0
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -0,0 +1,1545 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/etherdevice.h>
+#include <net/ip.h>
+#include <net/tso.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "nicvf_queues.h"
+
+struct rbuf_info {
+ struct page *page;
+ void *data;
+ u64 offset;
+};
+
+#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
+
+/* Poll a register for a specific value */
+static int nicvf_poll_reg(struct nicvf *nic, int qidx,
+ u64 reg, int bit_pos, int bits, int val)
+{
+ u64 bit_mask;
+ u64 reg_val;
+ int timeout = 10;
+
+ bit_mask = (1ULL << bits) - 1;
+ bit_mask = (bit_mask << bit_pos);
+
+ while (timeout) {
+ reg_val = nicvf_queue_reg_read(nic, reg, qidx);
+ if (((reg_val & bit_mask) >> bit_pos) == val)
+ return 0;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
+ return 1;
+}
+
+/* Allocate memory for a queue's descriptors */
+static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
+ int q_len, int desc_size, int align_bytes)
+{
+ dmem->q_len = q_len;
+ dmem->size = (desc_size * q_len) + align_bytes;
+ /* Save address, need it while freeing */
+ dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
+ &dmem->dma, GFP_KERNEL);
+ if (!dmem->unalign_base)
+ return -ENOMEM;
+
+ /* Align memory address for 'align_bytes' */
+ dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
+ dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
+ return 0;
+}
+
+/* Free queue's descriptor memory */
+static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
+{
+ if (!dmem)
+ return;
+
+ dma_free_coherent(&nic->pdev->dev, dmem->size,
+ dmem->unalign_base, dmem->dma);
+ dmem->unalign_base = NULL;
+ dmem->base = NULL;
+}
+
+/* Allocate buffer for packet reception
+ * HW returns memory address where packet is DMA'ed but not a pointer
+ * into RBDR ring, so save buffer address at the start of fragment and
+ * align the start address to a cache aligned address
+ */
+static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
+ u32 buf_len, u64 **rbuf)
+{
+ u64 data;
+ struct rbuf_info *rinfo;
+ int order = get_order(buf_len);
+
+ /* Check if request can be accomodated in previous allocated page */
+ if (nic->rb_page) {
+ if ((nic->rb_page_offset + buf_len + buf_len) >
+ (PAGE_SIZE << order)) {
+ nic->rb_page = NULL;
+ } else {
+ nic->rb_page_offset += buf_len;
+ get_page(nic->rb_page);
+ }
+ }
+
+ /* Allocate a new page */
+ if (!nic->rb_page) {
+ nic->rb_page = alloc_pages(gfp | __GFP_COMP, order);
+ if (!nic->rb_page) {
+ netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n");
+ return -ENOMEM;
+ }
+ nic->rb_page_offset = 0;
+ }
+
+ data = (u64)page_address(nic->rb_page) + nic->rb_page_offset;
+
+ /* Align buffer addr to cache line i.e 128 bytes */
+ rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data));
+ /* Save page address for reference updation */
+ rinfo->page = nic->rb_page;
+ /* Store start address for later retrieval */
+ rinfo->data = (void *)data;
+ /* Store alignment offset */
+ rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data);
+
+ data += rinfo->offset;
+
+ /* Give next aligned address to hw for DMA */
+ *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES);
+ return 0;
+}
+
+/* Retrieve actual buffer start address and build skb for received packet */
+static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
+ u64 rb_ptr, int len)
+{
+ struct sk_buff *skb;
+ struct rbuf_info *rinfo;
+
+ rb_ptr = (u64)phys_to_virt(rb_ptr);
+ /* Get buffer start address and alignment offset */
+ rinfo = GET_RBUF_INFO(rb_ptr);
+
+ /* Now build an skb to give to stack */
+ skb = build_skb(rinfo->data, RCV_FRAG_LEN);
+ if (!skb) {
+ put_page(rinfo->page);
+ return NULL;
+ }
+
+ /* Set correct skb->data */
+ skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES);
+
+ prefetch((void *)rb_ptr);
+ return skb;
+}
+
+/* Allocate RBDR ring and populate receive buffers */
+static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
+ int ring_len, int buf_size)
+{
+ int idx;
+ u64 *rbuf;
+ struct rbdr_entry_t *desc;
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
+ sizeof(struct rbdr_entry_t),
+ NICVF_RCV_BUF_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ rbdr->desc = rbdr->dmem.base;
+ /* Buffer size has to be in multiples of 128 bytes */
+ rbdr->dma_size = buf_size;
+ rbdr->enable = true;
+ rbdr->thresh = RBDR_THRESH;
+
+ nic->rb_page = NULL;
+ for (idx = 0; idx < ring_len; idx++) {
+ err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
+ &rbuf);
+ if (err)
+ return err;
+
+ desc = GET_RBDR_DESC(rbdr, idx);
+ desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+ }
+ return 0;
+}
+
+/* Free RBDR ring and its receive buffers */
+static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
+{
+ int head, tail;
+ u64 buf_addr;
+ struct rbdr_entry_t *desc;
+ struct rbuf_info *rinfo;
+
+ if (!rbdr)
+ return;
+
+ rbdr->enable = false;
+ if (!rbdr->dmem.base)
+ return;
+
+ head = rbdr->head;
+ tail = rbdr->tail;
+
+ /* Free SKBs */
+ while (head != tail) {
+ desc = GET_RBDR_DESC(rbdr, head);
+ buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
+ rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
+ put_page(rinfo->page);
+ head++;
+ head &= (rbdr->dmem.q_len - 1);
+ }
+ /* Free SKB of tail desc */
+ desc = GET_RBDR_DESC(rbdr, tail);
+ buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
+ rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
+ put_page(rinfo->page);
+
+ /* Free RBDR ring */
+ nicvf_free_q_desc_mem(nic, &rbdr->dmem);
+}
+
+/* Refill receive buffer descriptors with new buffers.
+ */
+static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
+{
+ struct queue_set *qs = nic->qs;
+ int rbdr_idx = qs->rbdr_cnt;
+ int tail, qcount;
+ int refill_rb_cnt;
+ struct rbdr *rbdr;
+ struct rbdr_entry_t *desc;
+ u64 *rbuf;
+ int new_rb = 0;
+
+refill:
+ if (!rbdr_idx)
+ return;
+ rbdr_idx--;
+ rbdr = &qs->rbdr[rbdr_idx];
+ /* Check if it's enabled */
+ if (!rbdr->enable)
+ goto next_rbdr;
+
+ /* Get no of desc's to be refilled */
+ qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
+ qcount &= 0x7FFFF;
+ /* Doorbell can be ringed with a max of ring size minus 1 */
+ if (qcount >= (qs->rbdr_len - 1))
+ goto next_rbdr;
+ else
+ refill_rb_cnt = qs->rbdr_len - qcount - 1;
+
+ /* Start filling descs from tail */
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
+ while (refill_rb_cnt) {
+ tail++;
+ tail &= (rbdr->dmem.q_len - 1);
+
+ if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
+ break;
+
+ desc = GET_RBDR_DESC(rbdr, tail);
+ desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+ refill_rb_cnt--;
+ new_rb++;
+ }
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Check if buffer allocation failed */
+ if (refill_rb_cnt)
+ nic->rb_alloc_fail = true;
+ else
+ nic->rb_alloc_fail = false;
+
+ /* Notify HW */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+ rbdr_idx, new_rb);
+next_rbdr:
+ /* Re-enable RBDR interrupts only if buffer allocation is success */
+ if (!nic->rb_alloc_fail && rbdr->enable)
+ nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
+
+ if (rbdr_idx)
+ goto refill;
+}
+
+/* Alloc rcv buffers in non-atomic mode for better success */
+void nicvf_rbdr_work(struct work_struct *work)
+{
+ struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
+
+ nicvf_refill_rbdr(nic, GFP_KERNEL);
+ if (nic->rb_alloc_fail)
+ schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
+ else
+ nic->rb_work_scheduled = false;
+}
+
+/* In Softirq context, alloc rcv buffers in atomic mode */
+void nicvf_rbdr_task(unsigned long data)
+{
+ struct nicvf *nic = (struct nicvf *)data;
+
+ nicvf_refill_rbdr(nic, GFP_ATOMIC);
+ if (nic->rb_alloc_fail) {
+ nic->rb_work_scheduled = true;
+ schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
+ }
+}
+
+/* Initialize completion queue */
+static int nicvf_init_cmp_queue(struct nicvf *nic,
+ struct cmp_queue *cq, int q_len)
+{
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
+ NICVF_CQ_BASE_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ cq->desc = cq->dmem.base;
+ cq->thresh = CMP_QUEUE_CQE_THRESH;
+ nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
+
+ return 0;
+}
+
+static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
+{
+ if (!cq)
+ return;
+ if (!cq->dmem.base)
+ return;
+
+ nicvf_free_q_desc_mem(nic, &cq->dmem);
+}
+
+/* Initialize transmit queue */
+static int nicvf_init_snd_queue(struct nicvf *nic,
+ struct snd_queue *sq, int q_len)
+{
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
+ NICVF_SQ_BASE_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ sq->desc = sq->dmem.base;
+ sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
+ if (!sq->skbuff)
+ return -ENOMEM;
+ sq->head = 0;
+ sq->tail = 0;
+ atomic_set(&sq->free_cnt, q_len - 1);
+ sq->thresh = SND_QUEUE_THRESH;
+
+ /* Preallocate memory for TSO segment's header */
+ sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
+ q_len * TSO_HEADER_SIZE,
+ &sq->tso_hdrs_phys, GFP_KERNEL);
+ if (!sq->tso_hdrs)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
+{
+ if (!sq)
+ return;
+ if (!sq->dmem.base)
+ return;
+
+ if (sq->tso_hdrs)
+ dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len,
+ sq->tso_hdrs, sq->tso_hdrs_phys);
+
+ kfree(sq->skbuff);
+ nicvf_free_q_desc_mem(nic, &sq->dmem);
+}
+
+static void nicvf_reclaim_snd_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ /* Disable send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
+ /* Check if SQ is stopped */
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
+ return;
+ /* Reset send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+}
+
+static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ union nic_mbx mbx = {};
+
+ /* Make sure all packets in the pipeline are written back into mem */
+ mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ /* Disable timer threshold (doesn't get reset upon CQ reset */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+ /* Disable completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
+ /* Reset completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+}
+
+static void nicvf_reclaim_rbdr(struct nicvf *nic,
+ struct rbdr *rbdr, int qidx)
+{
+ u64 tmp, fifo_state;
+ int timeout = 10;
+
+ /* Save head and tail pointers for feeing up buffers */
+ rbdr->head = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_HEAD,
+ qidx) >> 3;
+ rbdr->tail = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_TAIL,
+ qidx) >> 3;
+
+ /* If RBDR FIFO is in 'FAIL' state then do a reset first
+ * before relaiming.
+ */
+ fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
+ if (((fifo_state >> 62) & 0x03) == 0x3)
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, NICVF_RBDR_RESET);
+
+ /* Disable RBDR */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+ return;
+ while (1) {
+ tmp = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
+ qidx);
+ if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
+ break;
+ usleep_range(1000, 2000);
+ timeout--;
+ if (!timeout) {
+ netdev_err(nic->netdev,
+ "Failed polling on prefetch status\n");
+ return;
+ }
+ }
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, NICVF_RBDR_RESET);
+
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
+ return;
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+ return;
+}
+
+/* Configures receive queue */
+static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct rcv_queue *rq;
+ struct rq_cfg rq_cfg;
+
+ rq = &qs->rq[qidx];
+ rq->enable = enable;
+
+ /* Disable receive queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
+
+ if (!rq->enable) {
+ nicvf_reclaim_rcv_queue(nic, qs, qidx);
+ return;
+ }
+
+ rq->cq_qs = qs->vnic_id;
+ rq->cq_idx = qidx;
+ rq->start_rbdr_qs = qs->vnic_id;
+ rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
+ rq->cont_rbdr_qs = qs->vnic_id;
+ rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
+ /* all writes of RBDR data to be loaded into L2 Cache as well*/
+ rq->caching = 1;
+
+ /* Send a mailbox msg to PF to config RQ */
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
+ mbx.rq.qs_num = qs->vnic_id;
+ mbx.rq.rq_num = qidx;
+ mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
+ (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
+ (rq->cont_qs_rbdr_idx << 8) |
+ (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
+ mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ /* RQ drop config
+ * Enable CQ drop to reserve sufficient CQEs for all tx packets
+ */
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
+ mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00);
+
+ /* Enable Receive queue */
+ rq_cfg.ena = 1;
+ rq_cfg.tcp_ena = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
+}
+
+/* Configures completion queue */
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ struct cmp_queue *cq;
+ struct cq_cfg cq_cfg;
+
+ cq = &qs->cq[qidx];
+ cq->enable = enable;
+
+ if (!cq->enable) {
+ nicvf_reclaim_cmp_queue(nic, qs, qidx);
+ return;
+ }
+
+ /* Reset completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+
+ if (!cq->enable)
+ return;
+
+ spin_lock_init(&cq->lock);
+ /* Set completion queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
+ qidx, (u64)(cq->dmem.phys_base));
+
+ /* Enable Completion queue */
+ cq_cfg.ena = 1;
+ cq_cfg.reset = 0;
+ cq_cfg.caching = 0;
+ cq_cfg.qsize = CMP_QSIZE;
+ cq_cfg.avg_con = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
+ qidx, nic->cq_coalesce_usecs);
+}
+
+/* Configures transmit queue */
+static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct snd_queue *sq;
+ struct sq_cfg sq_cfg;
+
+ sq = &qs->sq[qidx];
+ sq->enable = enable;
+
+ if (!sq->enable) {
+ nicvf_reclaim_snd_queue(nic, qs, qidx);
+ return;
+ }
+
+ /* Reset send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+
+ sq->cq_qs = qs->vnic_id;
+ sq->cq_idx = qidx;
+
+ /* Send a mailbox msg to PF to config SQ */
+ mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
+ mbx.sq.qs_num = qs->vnic_id;
+ mbx.sq.sq_num = qidx;
+ mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ /* Set queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
+ qidx, (u64)(sq->dmem.phys_base));
+
+ /* Enable send queue & set queue size */
+ sq_cfg.ena = 1;
+ sq_cfg.reset = 0;
+ sq_cfg.ldwb = 0;
+ sq_cfg.qsize = SND_QSIZE;
+ sq_cfg.tstmp_bgx_intf = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
+
+ /* Set queue:cpu affinity for better load distribution */
+ if (cpu_online(qidx)) {
+ cpumask_set_cpu(qidx, &sq->affinity_mask);
+ netif_set_xps_queue(nic->netdev,
+ &sq->affinity_mask, qidx);
+ }
+}
+
+/* Configures receive buffer descriptor ring */
+static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ struct rbdr *rbdr;
+ struct rbdr_cfg rbdr_cfg;
+
+ rbdr = &qs->rbdr[qidx];
+ nicvf_reclaim_rbdr(nic, rbdr, qidx);
+ if (!enable)
+ return;
+
+ /* Set descriptor base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
+ qidx, (u64)(rbdr->dmem.phys_base));
+
+ /* Enable RBDR & set queue size */
+ /* Buffer size should be in multiples of 128 bytes */
+ rbdr_cfg.ena = 1;
+ rbdr_cfg.reset = 0;
+ rbdr_cfg.ldwb = 0;
+ rbdr_cfg.qsize = RBDR_SIZE;
+ rbdr_cfg.avg_con = 0;
+ rbdr_cfg.lines = rbdr->dma_size / 128;
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, *(u64 *)&rbdr_cfg);
+
+ /* Notify HW */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+ qidx, qs->rbdr_len - 1);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
+ qidx, rbdr->thresh - 1);
+}
+
+/* Requests PF to assign and enable Qset */
+void nicvf_qset_config(struct nicvf *nic, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct queue_set *qs = nic->qs;
+ struct qs_cfg *qs_cfg;
+
+ if (!qs) {
+ netdev_warn(nic->netdev,
+ "Qset is still not allocated, don't init queues\n");
+ return;
+ }
+
+ qs->enable = enable;
+ qs->vnic_id = nic->vf_id;
+
+ /* Send a mailbox msg to PF to config Qset */
+ mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
+ mbx.qs.num = qs->vnic_id;
+
+ mbx.qs.cfg = 0;
+ qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
+ if (qs->enable) {
+ qs_cfg->ena = 1;
+#ifdef __BIG_ENDIAN
+ qs_cfg->be = 1;
+#endif
+ qs_cfg->vnic = qs->vnic_id;
+ }
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_free_resources(struct nicvf *nic)
+{
+ int qidx;
+ struct queue_set *qs = nic->qs;
+
+ /* Free receive buffer descriptor ring */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
+
+ /* Free completion queue */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
+
+ /* Free send queue */
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_free_snd_queue(nic, &qs->sq[qidx]);
+}
+
+static int nicvf_alloc_resources(struct nicvf *nic)
+{
+ int qidx;
+ struct queue_set *qs = nic->qs;
+
+ /* Alloc receive buffer descriptor ring */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
+ DMA_BUFFER_LEN))
+ goto alloc_fail;
+ }
+
+ /* Alloc send queue */
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
+ if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
+ goto alloc_fail;
+ }
+
+ /* Alloc completion queue */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
+ goto alloc_fail;
+ }
+
+ return 0;
+alloc_fail:
+ nicvf_free_resources(nic);
+ return -ENOMEM;
+}
+
+int nicvf_set_qset_resources(struct nicvf *nic)
+{
+ struct queue_set *qs;
+
+ qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
+ if (!qs)
+ return -ENOMEM;
+ nic->qs = qs;
+
+ /* Set count of each queue */
+ qs->rbdr_cnt = RBDR_CNT;
+ qs->rq_cnt = RCV_QUEUE_CNT;
+ qs->sq_cnt = SND_QUEUE_CNT;
+ qs->cq_cnt = CMP_QUEUE_CNT;
+
+ /* Set queue lengths */
+ qs->rbdr_len = RCV_BUF_COUNT;
+ qs->sq_len = SND_QUEUE_LEN;
+ qs->cq_len = CMP_QUEUE_LEN;
+ return 0;
+}
+
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
+{
+ bool disable = false;
+ struct queue_set *qs = nic->qs;
+ int qidx;
+
+ if (!qs)
+ return 0;
+
+ if (enable) {
+ if (nicvf_alloc_resources(nic))
+ return -ENOMEM;
+
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_snd_queue_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_cmp_queue_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_rbdr_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_rcv_queue_config(nic, qs, qidx, enable);
+ } else {
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_rcv_queue_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_rbdr_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_snd_queue_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_cmp_queue_config(nic, qs, qidx, disable);
+
+ nicvf_free_resources(nic);
+ }
+
+ return 0;
+}
+
+/* Get a free desc from SQ
+ * returns descriptor ponter & descriptor number
+ */
+static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+ int qentry;
+
+ qentry = sq->tail;
+ atomic_sub(desc_cnt, &sq->free_cnt);
+ sq->tail += desc_cnt;
+ sq->tail &= (sq->dmem.q_len - 1);
+
+ return qentry;
+}
+
+/* Free descriptor back to SQ for future use */
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+ atomic_add(desc_cnt, &sq->free_cnt);
+ sq->head += desc_cnt;
+ sq->head &= (sq->dmem.q_len - 1);
+}
+
+static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
+{
+ qentry++;
+ qentry &= (sq->dmem.q_len - 1);
+ return qentry;
+}
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
+{
+ u64 sq_cfg;
+
+ sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+ sq_cfg |= NICVF_SQ_EN;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+ /* Ring doorbell so that H/W restarts processing SQEs */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
+}
+
+void nicvf_sq_disable(struct nicvf *nic, int qidx)
+{
+ u64 sq_cfg;
+
+ sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+ sq_cfg &= ~NICVF_SQ_EN;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+}
+
+void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
+ int qidx)
+{
+ u64 head, tail;
+ struct sk_buff *skb;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct sq_hdr_subdesc *hdr;
+
+ head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
+ while (sq->head != head) {
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+ if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
+ nicvf_put_sq_desc(sq, 1);
+ continue;
+ }
+ skb = (struct sk_buff *)sq->skbuff[sq->head];
+ atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
+ atomic64_add(hdr->tot_len,
+ (atomic64_t *)&netdev->stats.tx_bytes);
+ dev_kfree_skb_any(skb);
+ nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+ }
+}
+
+/* Calculate no of SQ subdescriptors needed to transmit all
+ * segments of this TSO packet.
+ * Taken from 'Tilera network driver' with a minor modification.
+ */
+static int nicvf_tso_count_subdescs(struct sk_buff *skb)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int data_len = skb->len - sh_len;
+ unsigned int p_len = sh->gso_size;
+ long f_id = -1; /* id of the current fragment */
+ long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
+ long f_used = 0; /* bytes used from the current fragment */
+ long n; /* size of the current piece of payload */
+ int num_edescs = 0;
+ int segment;
+
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+ unsigned int p_used = 0;
+
+ /* One edesc for header and for each piece of the payload. */
+ for (num_edescs++; p_used < p_len; num_edescs++) {
+ /* Advance as needed. */
+ while (f_used >= f_size) {
+ f_id++;
+ f_size = skb_frag_size(&sh->frags[f_id]);
+ f_used = 0;
+ }
+
+ /* Use bytes from the current fragment. */
+ n = p_len - p_used;
+ if (n > f_size - f_used)
+ n = f_size - f_used;
+ f_used += n;
+ p_used += n;
+ }
+
+ /* The last segment may be less than gso_size. */
+ data_len -= p_len;
+ if (data_len < p_len)
+ p_len = data_len;
+ }
+
+ /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
+ return num_edescs + sh->gso_segs;
+}
+
+/* Get the number of SQ descriptors needed to xmit this skb */
+static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
+{
+ int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
+
+ if (skb_shinfo(skb)->gso_size) {
+ subdesc_cnt = nicvf_tso_count_subdescs(skb);
+ return subdesc_cnt;
+ }
+
+ if (skb_shinfo(skb)->nr_frags)
+ subdesc_cnt += skb_shinfo(skb)->nr_frags;
+
+ return subdesc_cnt;
+}
+
+/* Add SQ HEADER subdescriptor.
+ * First subdescriptor for every send descriptor.
+ */
+static inline void
+nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
+ int subdesc_cnt, struct sk_buff *skb, int len)
+{
+ int proto;
+ struct sq_hdr_subdesc *hdr;
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+ sq->skbuff[qentry] = (u64)skb;
+
+ memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+ hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* No of subdescriptors following this */
+ hdr->subdesc_cnt = subdesc_cnt;
+ hdr->tot_len = len;
+
+ /* Offload checksum calculation to HW */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
+
+ hdr->csum_l3 = 1; /* Enable IP csum calculation */
+ hdr->l3_offset = skb_network_offset(skb);
+ hdr->l4_offset = skb_transport_offset(skb);
+
+ proto = ip_hdr(skb)->protocol;
+ switch (proto) {
+ case IPPROTO_TCP:
+ hdr->csum_l4 = SEND_L4_CSUM_TCP;
+ break;
+ case IPPROTO_UDP:
+ hdr->csum_l4 = SEND_L4_CSUM_UDP;
+ break;
+ case IPPROTO_SCTP:
+ hdr->csum_l4 = SEND_L4_CSUM_SCTP;
+ break;
+ }
+ }
+}
+
+/* SQ GATHER subdescriptor
+ * Must follow HDR descriptor
+ */
+static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
+ int size, u64 data)
+{
+ struct sq_gather_subdesc *gather;
+
+ qentry &= (sq->dmem.q_len - 1);
+ gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
+
+ memset(gather, 0, SND_QUEUE_DESC_SIZE);
+ gather->subdesc_type = SQ_DESC_TYPE_GATHER;
+ gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB;
+ gather->size = size;
+ gather->addr = data;
+}
+
+/* Segment a TSO packet into 'gso_size' segments and append
+ * them to SQ for transfer
+ */
+static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
+ int qentry, struct sk_buff *skb)
+{
+ struct tso_t tso;
+ int seg_subdescs = 0, desc_cnt = 0;
+ int seg_len, total_len, data_left;
+ int hdr_qentry = qentry;
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ tso_start(skb, &tso);
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ /* Save Qentry for adding HDR_SUBDESC at the end */
+ hdr_qentry = qentry;
+
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+
+ /* Add segment's header */
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+ nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
+ sq->tso_hdrs_phys +
+ qentry * TSO_HEADER_SIZE);
+ /* HDR_SUDESC + GATHER */
+ seg_subdescs = 2;
+ seg_len = hdr_len;
+
+ /* Add segment's payload fragments */
+ while (data_left > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_left);
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ nicvf_sq_add_gather_subdesc(sq, qentry, size,
+ virt_to_phys(tso.data));
+ seg_subdescs++;
+ seg_len += size;
+
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+ nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
+ seg_subdescs - 1, skb, seg_len);
+ sq->skbuff[hdr_qentry] = 0;
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+
+ desc_cnt += seg_subdescs;
+ }
+ /* Save SKB in the last segment for freeing */
+ sq->skbuff[hdr_qentry] = (u64)skb;
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Inform HW to xmit all TSO segments */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ skb_get_queue_mapping(skb), desc_cnt);
+ return 1;
+}
+
+/* Append an skb to a SQ for packet transfer. */
+int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
+{
+ int i, size;
+ int subdesc_cnt;
+ int sq_num, qentry;
+ struct queue_set *qs = nic->qs;
+ struct snd_queue *sq;
+
+ sq_num = skb_get_queue_mapping(skb);
+ sq = &qs->sq[sq_num];
+
+ subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
+ if (subdesc_cnt > atomic_read(&sq->free_cnt))
+ goto append_fail;
+
+ qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
+
+ /* Check if its a TSO packet */
+ if (skb_shinfo(skb)->gso_size)
+ return nicvf_sq_append_tso(nic, sq, qentry, skb);
+
+ /* Add SQ header subdesc */
+ nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
+
+ /* Add SQ gather subdescs */
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
+ nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
+
+ /* Check for scattered buffer */
+ if (!skb_is_nonlinear(skb))
+ goto doorbell;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[i];
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ size = skb_frag_size(frag);
+ nicvf_sq_add_gather_subdesc(sq, qentry, size,
+ virt_to_phys(
+ skb_frag_address(frag)));
+ }
+
+doorbell:
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Inform HW to xmit new packet */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ sq_num, subdesc_cnt);
+ return 1;
+
+append_fail:
+ netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
+ return 0;
+}
+
+static inline unsigned frag_num(unsigned i)
+{
+#ifdef __BIG_ENDIAN
+ return (i & ~3) + 3 - (i & 3);
+#else
+ return i;
+#endif
+}
+
+/* Returns SKB for a received packet */
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
+{
+ int frag;
+ int payload_len = 0;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *skb_frag = NULL;
+ struct sk_buff *prev_frag = NULL;
+ u16 *rb_lens = NULL;
+ u64 *rb_ptrs = NULL;
+
+ rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
+ rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+
+ netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
+ __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
+
+ for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
+ payload_len = rb_lens[frag_num(frag)];
+ if (!frag) {
+ /* First fragment */
+ skb = nicvf_rb_ptr_to_skb(nic,
+ *rb_ptrs - cqe_rx->align_pad,
+ payload_len);
+ if (!skb)
+ return NULL;
+ skb_reserve(skb, cqe_rx->align_pad);
+ skb_put(skb, payload_len);
+ } else {
+ /* Add fragments */
+ skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs,
+ payload_len);
+ if (!skb_frag) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ if (!skb_shinfo(skb)->frag_list)
+ skb_shinfo(skb)->frag_list = skb_frag;
+ else
+ prev_frag->next = skb_frag;
+
+ prev_frag = skb_frag;
+ skb->len += payload_len;
+ skb->data_len += payload_len;
+ skb_frag->len = payload_len;
+ }
+ /* Next buffer pointer */
+ rb_ptrs++;
+ }
+ return skb;
+}
+
+/* Enable interrupt */
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val;
+
+ reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ break;
+ case NICVF_INTR_MBOX:
+ reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
+ break;
+ case NICVF_INTR_QS_ERR:
+ reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to enable interrupt: unknown type\n");
+ break;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
+}
+
+/* Disable interrupt */
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val = 0;
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ break;
+ case NICVF_INTR_MBOX:
+ reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
+ break;
+ case NICVF_INTR_QS_ERR:
+ reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to disable interrupt: unknown type\n");
+ break;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
+}
+
+/* Clear interrupt */
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val = 0;
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ break;
+ case NICVF_INTR_MBOX:
+ reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
+ break;
+ case NICVF_INTR_QS_ERR:
+ reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to clear interrupt: unknown type\n");
+ break;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_INT, reg_val);
+}
+
+/* Check if interrupt is enabled */
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val;
+ u64 mask = 0xff;
+
+ reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ mask = NICVF_INTR_PKT_DROP_MASK;
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ mask = NICVF_INTR_TCP_TIMER_MASK;
+ break;
+ case NICVF_INTR_MBOX:
+ mask = NICVF_INTR_MBOX_MASK;
+ break;
+ case NICVF_INTR_QS_ERR:
+ mask = NICVF_INTR_QS_ERR_MASK;
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to check interrupt enable: unknown type\n");
+ break;
+ }
+
+ return (reg_val & mask);
+}
+
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
+{
+ struct rcv_queue *rq;
+
+#define GET_RQ_STATS(reg) \
+ nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
+ (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
+
+ rq = &nic->qs->rq[rq_idx];
+ rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
+ rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
+{
+ struct snd_queue *sq;
+
+#define GET_SQ_STATS(reg) \
+ nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
+ (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
+
+ sq = &nic->qs->sq[sq_idx];
+ sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
+ sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+/* Check for errors in the receive cmp.queue entry */
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
+{
+ struct cmp_queue_stats *stats = &cq->stats;
+
+ if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
+ stats->rx.errop.good++;
+ return 0;
+ }
+
+ if (netif_msg_rx_err(nic))
+ netdev_err(nic->netdev,
+ "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
+ nic->netdev->name,
+ cqe_rx->err_level, cqe_rx->err_opcode);
+
+ switch (cqe_rx->err_level) {
+ case CQ_ERRLVL_MAC:
+ stats->rx.errlvl.mac_errs++;
+ break;
+ case CQ_ERRLVL_L2:
+ stats->rx.errlvl.l2_errs++;
+ break;
+ case CQ_ERRLVL_L3:
+ stats->rx.errlvl.l3_errs++;
+ break;
+ case CQ_ERRLVL_L4:
+ stats->rx.errlvl.l4_errs++;
+ break;
+ }
+
+ switch (cqe_rx->err_opcode) {
+ case CQ_RX_ERROP_RE_PARTIAL:
+ stats->rx.errop.partial_pkts++;
+ break;
+ case CQ_RX_ERROP_RE_JABBER:
+ stats->rx.errop.jabber_errs++;
+ break;
+ case CQ_RX_ERROP_RE_FCS:
+ stats->rx.errop.fcs_errs++;
+ break;
+ case CQ_RX_ERROP_RE_TERMINATE:
+ stats->rx.errop.terminate_errs++;
+ break;
+ case CQ_RX_ERROP_RE_RX_CTL:
+ stats->rx.errop.bgx_rx_errs++;
+ break;
+ case CQ_RX_ERROP_PREL2_ERR:
+ stats->rx.errop.prel2_errs++;
+ break;
+ case CQ_RX_ERROP_L2_FRAGMENT:
+ stats->rx.errop.l2_frags++;
+ break;
+ case CQ_RX_ERROP_L2_OVERRUN:
+ stats->rx.errop.l2_overruns++;
+ break;
+ case CQ_RX_ERROP_L2_PFCS:
+ stats->rx.errop.l2_pfcs++;
+ break;
+ case CQ_RX_ERROP_L2_PUNY:
+ stats->rx.errop.l2_puny++;
+ break;
+ case CQ_RX_ERROP_L2_MAL:
+ stats->rx.errop.l2_hdr_malformed++;
+ break;
+ case CQ_RX_ERROP_L2_OVERSIZE:
+ stats->rx.errop.l2_oversize++;
+ break;
+ case CQ_RX_ERROP_L2_UNDERSIZE:
+ stats->rx.errop.l2_undersize++;
+ break;
+ case CQ_RX_ERROP_L2_LENMISM:
+ stats->rx.errop.l2_len_mismatch++;
+ break;
+ case CQ_RX_ERROP_L2_PCLP:
+ stats->rx.errop.l2_pclp++;
+ break;
+ case CQ_RX_ERROP_IP_NOT:
+ stats->rx.errop.non_ip++;
+ break;
+ case CQ_RX_ERROP_IP_CSUM_ERR:
+ stats->rx.errop.ip_csum_err++;
+ break;
+ case CQ_RX_ERROP_IP_MAL:
+ stats->rx.errop.ip_hdr_malformed++;
+ break;
+ case CQ_RX_ERROP_IP_MALD:
+ stats->rx.errop.ip_payload_malformed++;
+ break;
+ case CQ_RX_ERROP_IP_HOP:
+ stats->rx.errop.ip_hop_errs++;
+ break;
+ case CQ_RX_ERROP_L3_ICRC:
+ stats->rx.errop.l3_icrc_errs++;
+ break;
+ case CQ_RX_ERROP_L3_PCLP:
+ stats->rx.errop.l3_pclp++;
+ break;
+ case CQ_RX_ERROP_L4_MAL:
+ stats->rx.errop.l4_malformed++;
+ break;
+ case CQ_RX_ERROP_L4_CHK:
+ stats->rx.errop.l4_csum_errs++;
+ break;
+ case CQ_RX_ERROP_UDP_LEN:
+ stats->rx.errop.udp_len_err++;
+ break;
+ case CQ_RX_ERROP_L4_PORT:
+ stats->rx.errop.bad_l4_port++;
+ break;
+ case CQ_RX_ERROP_TCP_FLAG:
+ stats->rx.errop.bad_tcp_flag++;
+ break;
+ case CQ_RX_ERROP_TCP_OFFSET:
+ stats->rx.errop.tcp_offset_errs++;
+ break;
+ case CQ_RX_ERROP_L4_PCLP:
+ stats->rx.errop.l4_pclp++;
+ break;
+ case CQ_RX_ERROP_RBDR_TRUNC:
+ stats->rx.errop.pkt_truncated++;
+ break;
+ }
+
+ return 1;
+}
+
+/* Check for errors in the send cmp.queue entry */
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
+{
+ struct cmp_queue_stats *stats = &cq->stats;
+
+ switch (cqe_tx->send_status) {
+ case CQ_TX_ERROP_GOOD:
+ stats->tx.good++;
+ return 0;
+ case CQ_TX_ERROP_DESC_FAULT:
+ stats->tx.desc_fault++;
+ break;
+ case CQ_TX_ERROP_HDR_CONS_ERR:
+ stats->tx.hdr_cons_err++;
+ break;
+ case CQ_TX_ERROP_SUBDC_ERR:
+ stats->tx.subdesc_err++;
+ break;
+ case CQ_TX_ERROP_IMM_SIZE_OFLOW:
+ stats->tx.imm_size_oflow++;
+ break;
+ case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
+ stats->tx.data_seq_err++;
+ break;
+ case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
+ stats->tx.mem_seq_err++;
+ break;
+ case CQ_TX_ERROP_LOCK_VIOL:
+ stats->tx.lock_viol++;
+ break;
+ case CQ_TX_ERROP_DATA_FAULT:
+ stats->tx.data_fault++;
+ break;
+ case CQ_TX_ERROP_TSTMP_CONFLICT:
+ stats->tx.tstmp_conflict++;
+ break;
+ case CQ_TX_ERROP_TSTMP_TIMEOUT:
+ stats->tx.tstmp_timeout++;
+ break;
+ case CQ_TX_ERROP_MEM_FAULT:
+ stats->tx.mem_fault++;
+ break;
+ case CQ_TX_ERROP_CK_OVERLAP:
+ stats->tx.csum_overlap++;
+ break;
+ case CQ_TX_ERROP_CK_OFLOW:
+ stats->tx.csum_overflow++;
+ break;
+ }
+
+ return 1;
+}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
new file mode 100644
index 000000000000..8341bdf755d1
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NICVF_QUEUES_H
+#define NICVF_QUEUES_H
+
+#include <linux/netdevice.h>
+#include "q_struct.h"
+
+#define MAX_QUEUE_SET 128
+#define MAX_RCV_QUEUES_PER_QS 8
+#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
+#define MAX_SND_QUEUES_PER_QS 8
+#define MAX_CMP_QUEUES_PER_QS 8
+
+/* VF's queue interrupt ranges */
+#define NICVF_INTR_ID_CQ 0
+#define NICVF_INTR_ID_SQ 8
+#define NICVF_INTR_ID_RBDR 16
+#define NICVF_INTR_ID_MISC 18
+#define NICVF_INTR_ID_QS_ERR 19
+
+#define for_each_cq_irq(irq) \
+ for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
+#define for_each_sq_irq(irq) \
+ for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
+#define for_each_rbdr_irq(irq) \
+ for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
+
+#define RBDR_SIZE0 0ULL /* 8K entries */
+#define RBDR_SIZE1 1ULL /* 16K entries */
+#define RBDR_SIZE2 2ULL /* 32K entries */
+#define RBDR_SIZE3 3ULL /* 64K entries */
+#define RBDR_SIZE4 4ULL /* 126K entries */
+#define RBDR_SIZE5 5ULL /* 256K entries */
+#define RBDR_SIZE6 6ULL /* 512K entries */
+
+#define SND_QUEUE_SIZE0 0ULL /* 1K entries */
+#define SND_QUEUE_SIZE1 1ULL /* 2K entries */
+#define SND_QUEUE_SIZE2 2ULL /* 4K entries */
+#define SND_QUEUE_SIZE3 3ULL /* 8K entries */
+#define SND_QUEUE_SIZE4 4ULL /* 16K entries */
+#define SND_QUEUE_SIZE5 5ULL /* 32K entries */
+#define SND_QUEUE_SIZE6 6ULL /* 64K entries */
+
+#define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
+#define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
+#define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
+#define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
+#define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
+#define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
+#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
+
+/* Default queue count per QS, its lengths and threshold values */
+#define RBDR_CNT 1
+#define RCV_QUEUE_CNT 8
+#define SND_QUEUE_CNT 8
+#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
+
+#define SND_QSIZE SND_QUEUE_SIZE4
+#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
+#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
+#define SND_QUEUE_THRESH 2ULL
+#define MIN_SQ_DESC_PER_PKT_XMIT 2
+/* Since timestamp not enabled, otherwise 2 */
+#define MAX_CQE_PER_PKT_XMIT 1
+
+#define CMP_QSIZE CMP_QUEUE_SIZE4
+#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
+#define CMP_QUEUE_CQE_THRESH 0
+#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
+
+#define RBDR_SIZE RBDR_SIZE0
+#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
+#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
+#define RBDR_THRESH (RCV_BUF_COUNT / 2)
+#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
+#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
+ (NICVF_RCV_BUF_ALIGN_BYTES * 2))
+#define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES
+
+#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
+ MAX_CQE_PER_PKT_XMIT)
+#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256)
+
+/* Descriptor size in bytes */
+#define SND_QUEUE_DESC_SIZE 16
+#define CMP_QUEUE_DESC_SIZE 512
+
+/* Buffer / descriptor alignments */
+#define NICVF_RCV_BUF_ALIGN 7
+#define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN)
+#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
+#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
+
+#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
+#define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\
+ (NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES)
+#define NICVF_RCV_BUF_ALIGN_LEN(X)\
+ (NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X)
+
+/* Queue enable/disable */
+#define NICVF_SQ_EN BIT_ULL(19)
+
+/* Queue reset */
+#define NICVF_CQ_RESET BIT_ULL(41)
+#define NICVF_SQ_RESET BIT_ULL(17)
+#define NICVF_RBDR_RESET BIT_ULL(43)
+
+enum CQ_RX_ERRLVL_E {
+ CQ_ERRLVL_MAC,
+ CQ_ERRLVL_L2,
+ CQ_ERRLVL_L3,
+ CQ_ERRLVL_L4,
+};
+
+enum CQ_RX_ERROP_E {
+ CQ_RX_ERROP_RE_NONE = 0x0,
+ CQ_RX_ERROP_RE_PARTIAL = 0x1,
+ CQ_RX_ERROP_RE_JABBER = 0x2,
+ CQ_RX_ERROP_RE_FCS = 0x7,
+ CQ_RX_ERROP_RE_TERMINATE = 0x9,
+ CQ_RX_ERROP_RE_RX_CTL = 0xb,
+ CQ_RX_ERROP_PREL2_ERR = 0x1f,
+ CQ_RX_ERROP_L2_FRAGMENT = 0x20,
+ CQ_RX_ERROP_L2_OVERRUN = 0x21,
+ CQ_RX_ERROP_L2_PFCS = 0x22,
+ CQ_RX_ERROP_L2_PUNY = 0x23,
+ CQ_RX_ERROP_L2_MAL = 0x24,
+ CQ_RX_ERROP_L2_OVERSIZE = 0x25,
+ CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
+ CQ_RX_ERROP_L2_LENMISM = 0x27,
+ CQ_RX_ERROP_L2_PCLP = 0x28,
+ CQ_RX_ERROP_IP_NOT = 0x41,
+ CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
+ CQ_RX_ERROP_IP_MAL = 0x43,
+ CQ_RX_ERROP_IP_MALD = 0x44,
+ CQ_RX_ERROP_IP_HOP = 0x45,
+ CQ_RX_ERROP_L3_ICRC = 0x46,
+ CQ_RX_ERROP_L3_PCLP = 0x47,
+ CQ_RX_ERROP_L4_MAL = 0x61,
+ CQ_RX_ERROP_L4_CHK = 0x62,
+ CQ_RX_ERROP_UDP_LEN = 0x63,
+ CQ_RX_ERROP_L4_PORT = 0x64,
+ CQ_RX_ERROP_TCP_FLAG = 0x65,
+ CQ_RX_ERROP_TCP_OFFSET = 0x66,
+ CQ_RX_ERROP_L4_PCLP = 0x67,
+ CQ_RX_ERROP_RBDR_TRUNC = 0x70,
+};
+
+enum CQ_TX_ERROP_E {
+ CQ_TX_ERROP_GOOD = 0x0,
+ CQ_TX_ERROP_DESC_FAULT = 0x10,
+ CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
+ CQ_TX_ERROP_SUBDC_ERR = 0x12,
+ CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
+ CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
+ CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
+ CQ_TX_ERROP_LOCK_VIOL = 0x83,
+ CQ_TX_ERROP_DATA_FAULT = 0x84,
+ CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
+ CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
+ CQ_TX_ERROP_MEM_FAULT = 0x87,
+ CQ_TX_ERROP_CK_OVERLAP = 0x88,
+ CQ_TX_ERROP_CK_OFLOW = 0x89,
+ CQ_TX_ERROP_ENUM_LAST = 0x8a,
+};
+
+struct cmp_queue_stats {
+ struct rx_stats {
+ struct {
+ u64 mac_errs;
+ u64 l2_errs;
+ u64 l3_errs;
+ u64 l4_errs;
+ } errlvl;
+ struct {
+ u64 good;
+ u64 partial_pkts;
+ u64 jabber_errs;
+ u64 fcs_errs;
+ u64 terminate_errs;
+ u64 bgx_rx_errs;
+ u64 prel2_errs;
+ u64 l2_frags;
+ u64 l2_overruns;
+ u64 l2_pfcs;
+ u64 l2_puny;
+ u64 l2_hdr_malformed;
+ u64 l2_oversize;
+ u64 l2_undersize;
+ u64 l2_len_mismatch;
+ u64 l2_pclp;
+ u64 non_ip;
+ u64 ip_csum_err;
+ u64 ip_hdr_malformed;
+ u64 ip_payload_malformed;
+ u64 ip_hop_errs;
+ u64 l3_icrc_errs;
+ u64 l3_pclp;
+ u64 l4_malformed;
+ u64 l4_csum_errs;
+ u64 udp_len_err;
+ u64 bad_l4_port;
+ u64 bad_tcp_flag;
+ u64 tcp_offset_errs;
+ u64 l4_pclp;
+ u64 pkt_truncated;
+ } errop;
+ } rx;
+ struct tx_stats {
+ u64 good;
+ u64 desc_fault;
+ u64 hdr_cons_err;
+ u64 subdesc_err;
+ u64 imm_size_oflow;
+ u64 data_seq_err;
+ u64 mem_seq_err;
+ u64 lock_viol;
+ u64 data_fault;
+ u64 tstmp_conflict;
+ u64 tstmp_timeout;
+ u64 mem_fault;
+ u64 csum_overlap;
+ u64 csum_overflow;
+ } tx;
+} ____cacheline_aligned_in_smp;
+
+enum RQ_SQ_STATS {
+ RQ_SQ_STATS_OCTS,
+ RQ_SQ_STATS_PKTS,
+};
+
+struct rx_tx_queue_stats {
+ u64 bytes;
+ u64 pkts;
+} ____cacheline_aligned_in_smp;
+
+struct q_desc_mem {
+ dma_addr_t dma;
+ u64 size;
+ u16 q_len;
+ dma_addr_t phys_base;
+ void *base;
+ void *unalign_base;
+};
+
+struct rbdr {
+ bool enable;
+ u32 dma_size;
+ u32 frag_len;
+ u32 thresh; /* Threshold level for interrupt */
+ void *desc;
+ u32 head;
+ u32 tail;
+ struct q_desc_mem dmem;
+} ____cacheline_aligned_in_smp;
+
+struct rcv_queue {
+ bool enable;
+ struct rbdr *rbdr_start;
+ struct rbdr *rbdr_cont;
+ bool en_tcp_reassembly;
+ u8 cq_qs; /* CQ's QS to which this RQ is assigned */
+ u8 cq_idx; /* CQ index (0 to 7) in the QS */
+ u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */
+ u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */
+ u8 start_rbdr_qs; /* First buffer ptrs - QS num */
+ u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
+ u8 caching;
+ struct rx_tx_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct cmp_queue {
+ bool enable;
+ u16 thresh;
+ spinlock_t lock; /* lock to serialize processing CQEs */
+ void *desc;
+ struct q_desc_mem dmem;
+ struct cmp_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct snd_queue {
+ bool enable;
+ u8 cq_qs; /* CQ's QS to which this SQ is pointing */
+ u8 cq_idx; /* CQ index (0 to 7) in the above QS */
+ u16 thresh;
+ atomic_t free_cnt;
+ u32 head;
+ u32 tail;
+ u64 *skbuff;
+ void *desc;
+
+#define TSO_HEADER_SIZE 128
+ /* For TSO segment's header */
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_phys;
+
+ cpumask_t affinity_mask;
+ struct q_desc_mem dmem;
+ struct rx_tx_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct queue_set {
+ bool enable;
+ bool be_en;
+ u8 vnic_id;
+ u8 rq_cnt;
+ u8 cq_cnt;
+ u64 cq_len;
+ u8 sq_cnt;
+ u64 sq_len;
+ u8 rbdr_cnt;
+ u64 rbdr_len;
+ struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
+ struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS];
+ struct snd_queue sq[MAX_SND_QUEUES_PER_QS];
+ struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
+} ____cacheline_aligned_in_smp;
+
+#define GET_RBDR_DESC(RING, idx)\
+ (&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
+#define GET_SQ_DESC(RING, idx)\
+ (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
+#define GET_CQ_DESC(RING, idx)\
+ (&(((union cq_desc_t *)((RING)->desc))[idx]))
+
+/* CQ status bits */
+#define CQ_WR_FULL BIT(26)
+#define CQ_WR_DISABLE BIT(25)
+#define CQ_WR_FAULT BIT(24)
+#define CQ_CQE_COUNT (0xFFFF << 0)
+
+#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+
+int nicvf_set_qset_resources(struct nicvf *nic);
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
+void nicvf_qset_config(struct nicvf *nic, bool enable);
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable);
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
+void nicvf_sq_disable(struct nicvf *nic, int qidx);
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
+void nicvf_sq_free_used_descs(struct net_device *netdev,
+ struct snd_queue *sq, int qidx);
+int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb);
+
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
+void nicvf_rbdr_task(unsigned long data);
+void nicvf_rbdr_work(struct work_struct *work);
+
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
+
+/* Register access APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+ u64 qidx, u64 val);
+u64 nicvf_queue_reg_read(struct nicvf *nic,
+ u64 offset, u64 qidx);
+
+/* Stats */
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
+#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h
new file mode 100644
index 000000000000..3c1de97b1add
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/q_struct.h
@@ -0,0 +1,701 @@
+/*
+ * This file contains HW queue descriptor formats, config register
+ * structures etc
+ *
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef Q_STRUCT_H
+#define Q_STRUCT_H
+
+/* Load transaction types for reading segment bytes specified by
+ * NIC_SEND_GATHER_S[LD_TYPE].
+ */
+enum nic_send_ld_type_e {
+ NIC_SEND_LD_TYPE_E_LDD = 0x0,
+ NIC_SEND_LD_TYPE_E_LDT = 0x1,
+ NIC_SEND_LD_TYPE_E_LDWB = 0x2,
+ NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
+};
+
+enum ether_type_algorithm {
+ ETYPE_ALG_NONE = 0x0,
+ ETYPE_ALG_SKIP = 0x1,
+ ETYPE_ALG_ENDPARSE = 0x2,
+ ETYPE_ALG_VLAN = 0x3,
+ ETYPE_ALG_VLAN_STRIP = 0x4,
+};
+
+enum layer3_type {
+ L3TYPE_NONE = 0x00,
+ L3TYPE_GRH = 0x01,
+ L3TYPE_IPV4 = 0x04,
+ L3TYPE_IPV4_OPTIONS = 0x05,
+ L3TYPE_IPV6 = 0x06,
+ L3TYPE_IPV6_OPTIONS = 0x07,
+ L3TYPE_ET_STOP = 0x0D,
+ L3TYPE_OTHER = 0x0E,
+};
+
+enum layer4_type {
+ L4TYPE_NONE = 0x00,
+ L4TYPE_IPSEC_ESP = 0x01,
+ L4TYPE_IPFRAG = 0x02,
+ L4TYPE_IPCOMP = 0x03,
+ L4TYPE_TCP = 0x04,
+ L4TYPE_UDP = 0x05,
+ L4TYPE_SCTP = 0x06,
+ L4TYPE_GRE = 0x07,
+ L4TYPE_ROCE_BTH = 0x08,
+ L4TYPE_OTHER = 0x0E,
+};
+
+/* CPI and RSSI configuration */
+enum cpi_algorithm_type {
+ CPI_ALG_NONE = 0x0,
+ CPI_ALG_VLAN = 0x1,
+ CPI_ALG_VLAN16 = 0x2,
+ CPI_ALG_DIFF = 0x3,
+};
+
+enum rss_algorithm_type {
+ RSS_ALG_NONE = 0x00,
+ RSS_ALG_PORT = 0x01,
+ RSS_ALG_IP = 0x02,
+ RSS_ALG_TCP_IP = 0x03,
+ RSS_ALG_UDP_IP = 0x04,
+ RSS_ALG_SCTP_IP = 0x05,
+ RSS_ALG_GRE_IP = 0x06,
+ RSS_ALG_ROCE = 0x07,
+};
+
+enum rss_hash_cfg {
+ RSS_HASH_L2ETC = 0x00,
+ RSS_HASH_IP = 0x01,
+ RSS_HASH_TCP = 0x02,
+ RSS_HASH_TCP_SYN_DIS = 0x03,
+ RSS_HASH_UDP = 0x04,
+ RSS_HASH_L4ETC = 0x05,
+ RSS_HASH_ROCE = 0x06,
+ RSS_L3_BIDI = 0x07,
+ RSS_L4_BIDI = 0x08,
+};
+
+/* Completion queue entry types */
+enum cqe_type {
+ CQE_TYPE_INVALID = 0x0,
+ CQE_TYPE_RX = 0x2,
+ CQE_TYPE_RX_SPLIT = 0x3,
+ CQE_TYPE_RX_TCP = 0x4,
+ CQE_TYPE_SEND = 0x8,
+ CQE_TYPE_SEND_PTP = 0x9,
+};
+
+enum cqe_rx_tcp_status {
+ CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
+ CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
+};
+
+enum cqe_send_status {
+ CQE_SEND_STATUS_GOOD = 0x00,
+ CQE_SEND_STATUS_DESC_FAULT = 0x01,
+ CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
+ CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
+ CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
+ CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
+ CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
+ CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
+ CQE_SEND_STATUS_LOCK_VIOL = 0x84,
+ CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
+ CQE_SEND_STATUS_DATA_FAULT = 0x86,
+ CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
+ CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
+ CQE_SEND_STATUS_MEM_FAULT = 0x89,
+ CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
+ CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
+};
+
+enum cqe_rx_tcp_end_reason {
+ CQE_RX_TCP_END_FIN_FLAG_DET = 0,
+ CQE_RX_TCP_END_INVALID_FLAG = 1,
+ CQE_RX_TCP_END_TIMEOUT = 2,
+ CQE_RX_TCP_END_OUT_OF_SEQ = 3,
+ CQE_RX_TCP_END_PKT_ERR = 4,
+ CQE_RX_TCP_END_QS_DISABLED = 0x0F,
+};
+
+/* Packet protocol level error enumeration */
+enum cqe_rx_err_level {
+ CQE_RX_ERRLVL_RE = 0x0,
+ CQE_RX_ERRLVL_L2 = 0x1,
+ CQE_RX_ERRLVL_L3 = 0x2,
+ CQE_RX_ERRLVL_L4 = 0x3,
+};
+
+/* Packet protocol level error type enumeration */
+enum cqe_rx_err_opcode {
+ CQE_RX_ERR_RE_NONE = 0x0,
+ CQE_RX_ERR_RE_PARTIAL = 0x1,
+ CQE_RX_ERR_RE_JABBER = 0x2,
+ CQE_RX_ERR_RE_FCS = 0x7,
+ CQE_RX_ERR_RE_TERMINATE = 0x9,
+ CQE_RX_ERR_RE_RX_CTL = 0xb,
+ CQE_RX_ERR_PREL2_ERR = 0x1f,
+ CQE_RX_ERR_L2_FRAGMENT = 0x20,
+ CQE_RX_ERR_L2_OVERRUN = 0x21,
+ CQE_RX_ERR_L2_PFCS = 0x22,
+ CQE_RX_ERR_L2_PUNY = 0x23,
+ CQE_RX_ERR_L2_MAL = 0x24,
+ CQE_RX_ERR_L2_OVERSIZE = 0x25,
+ CQE_RX_ERR_L2_UNDERSIZE = 0x26,
+ CQE_RX_ERR_L2_LENMISM = 0x27,
+ CQE_RX_ERR_L2_PCLP = 0x28,
+ CQE_RX_ERR_IP_NOT = 0x41,
+ CQE_RX_ERR_IP_CHK = 0x42,
+ CQE_RX_ERR_IP_MAL = 0x43,
+ CQE_RX_ERR_IP_MALD = 0x44,
+ CQE_RX_ERR_IP_HOP = 0x45,
+ CQE_RX_ERR_L3_ICRC = 0x46,
+ CQE_RX_ERR_L3_PCLP = 0x47,
+ CQE_RX_ERR_L4_MAL = 0x61,
+ CQE_RX_ERR_L4_CHK = 0x62,
+ CQE_RX_ERR_UDP_LEN = 0x63,
+ CQE_RX_ERR_L4_PORT = 0x64,
+ CQE_RX_ERR_TCP_FLAG = 0x65,
+ CQE_RX_ERR_TCP_OFFSET = 0x66,
+ CQE_RX_ERR_L4_PCLP = 0x67,
+ CQE_RX_ERR_RBDR_TRUNC = 0x70,
+};
+
+struct cqe_rx_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 stdn_fault:1;
+ u64 rsvd0:1;
+ u64 rq_qs:7;
+ u64 rq_idx:3;
+ u64 rsvd1:12;
+ u64 rss_alg:4;
+ u64 rsvd2:4;
+ u64 rb_cnt:4;
+ u64 vlan_found:1;
+ u64 vlan_stripped:1;
+ u64 vlan2_found:1;
+ u64 vlan2_stripped:1;
+ u64 l4_type:4;
+ u64 l3_type:4;
+ u64 l2_present:1;
+ u64 err_level:3;
+ u64 err_opcode:8;
+
+ u64 pkt_len:16; /* W1 */
+ u64 l2_ptr:8;
+ u64 l3_ptr:8;
+ u64 l4_ptr:8;
+ u64 cq_pkt_len:8;
+ u64 align_pad:3;
+ u64 rsvd3:1;
+ u64 chan:12;
+
+ u64 rss_tag:32; /* W2 */
+ u64 vlan_tci:16;
+ u64 vlan_ptr:8;
+ u64 vlan2_ptr:8;
+
+ u64 rb3_sz:16; /* W3 */
+ u64 rb2_sz:16;
+ u64 rb1_sz:16;
+ u64 rb0_sz:16;
+
+ u64 rb7_sz:16; /* W4 */
+ u64 rb6_sz:16;
+ u64 rb5_sz:16;
+ u64 rb4_sz:16;
+
+ u64 rb11_sz:16; /* W5 */
+ u64 rb10_sz:16;
+ u64 rb9_sz:16;
+ u64 rb8_sz:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 err_opcode:8;
+ u64 err_level:3;
+ u64 l2_present:1;
+ u64 l3_type:4;
+ u64 l4_type:4;
+ u64 vlan2_stripped:1;
+ u64 vlan2_found:1;
+ u64 vlan_stripped:1;
+ u64 vlan_found:1;
+ u64 rb_cnt:4;
+ u64 rsvd2:4;
+ u64 rss_alg:4;
+ u64 rsvd1:12;
+ u64 rq_idx:3;
+ u64 rq_qs:7;
+ u64 rsvd0:1;
+ u64 stdn_fault:1;
+ u64 cqe_type:4; /* W0 */
+ u64 chan:12;
+ u64 rsvd3:1;
+ u64 align_pad:3;
+ u64 cq_pkt_len:8;
+ u64 l4_ptr:8;
+ u64 l3_ptr:8;
+ u64 l2_ptr:8;
+ u64 pkt_len:16; /* W1 */
+ u64 vlan2_ptr:8;
+ u64 vlan_ptr:8;
+ u64 vlan_tci:16;
+ u64 rss_tag:32; /* W2 */
+ u64 rb0_sz:16;
+ u64 rb1_sz:16;
+ u64 rb2_sz:16;
+ u64 rb3_sz:16; /* W3 */
+ u64 rb4_sz:16;
+ u64 rb5_sz:16;
+ u64 rb6_sz:16;
+ u64 rb7_sz:16; /* W4 */
+ u64 rb8_sz:16;
+ u64 rb9_sz:16;
+ u64 rb10_sz:16;
+ u64 rb11_sz:16; /* W5 */
+#endif
+ u64 rb0_ptr:64;
+ u64 rb1_ptr:64;
+ u64 rb2_ptr:64;
+ u64 rb3_ptr:64;
+ u64 rb4_ptr:64;
+ u64 rb5_ptr:64;
+ u64 rb6_ptr:64;
+ u64 rb7_ptr:64;
+ u64 rb8_ptr:64;
+ u64 rb9_ptr:64;
+ u64 rb10_ptr:64;
+ u64 rb11_ptr:64;
+};
+
+struct cqe_rx_tcp_err_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:60;
+
+ u64 rsvd1:4; /* W1 */
+ u64 partial_first:1;
+ u64 rsvd2:27;
+ u64 rbdr_bytes:8;
+ u64 rsvd3:24;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 rsvd0:60;
+ u64 cqe_type:4;
+
+ u64 rsvd3:24;
+ u64 rbdr_bytes:8;
+ u64 rsvd2:27;
+ u64 partial_first:1;
+ u64 rsvd1:4;
+#endif
+};
+
+struct cqe_rx_tcp_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:52;
+ u64 cq_tcp_status:8;
+
+ u64 rsvd1:32; /* W1 */
+ u64 tcp_cntx_bytes:8;
+ u64 rsvd2:8;
+ u64 tcp_err_bytes:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 cq_tcp_status:8;
+ u64 rsvd0:52;
+ u64 cqe_type:4; /* W0 */
+
+ u64 tcp_err_bytes:16;
+ u64 rsvd2:8;
+ u64 tcp_cntx_bytes:8;
+ u64 rsvd1:32; /* W1 */
+#endif
+};
+
+struct cqe_send_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:4;
+ u64 sqe_ptr:16;
+ u64 rsvd1:4;
+ u64 rsvd2:10;
+ u64 sq_qs:7;
+ u64 sq_idx:3;
+ u64 rsvd3:8;
+ u64 send_status:8;
+
+ u64 ptp_timestamp:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 send_status:8;
+ u64 rsvd3:8;
+ u64 sq_idx:3;
+ u64 sq_qs:7;
+ u64 rsvd2:10;
+ u64 rsvd1:4;
+ u64 sqe_ptr:16;
+ u64 rsvd0:4;
+ u64 cqe_type:4; /* W0 */
+
+ u64 ptp_timestamp:64; /* W1 */
+#endif
+};
+
+union cq_desc_t {
+ u64 u[64];
+ struct cqe_send_t snd_hdr;
+ struct cqe_rx_t rx_hdr;
+ struct cqe_rx_tcp_t rx_tcp_hdr;
+ struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
+};
+
+struct rbdr_entry_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd0:15;
+ u64 buf_addr:42;
+ u64 cache_align:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 cache_align:7;
+ u64 buf_addr:42;
+ u64 rsvd0:15;
+#endif
+};
+
+/* TCP reassembly context */
+struct rbe_tcp_cnxt_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 tcp_pkt_cnt:12;
+ u64 rsvd1:4;
+ u64 align_hdr_bytes:4;
+ u64 align_ptr_bytes:4;
+ u64 ptr_bytes:16;
+ u64 rsvd2:24;
+ u64 cqe_type:4;
+ u64 rsvd0:54;
+ u64 tcp_end_reason:2;
+ u64 tcp_status:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tcp_status:4;
+ u64 tcp_end_reason:2;
+ u64 rsvd0:54;
+ u64 cqe_type:4;
+ u64 rsvd2:24;
+ u64 ptr_bytes:16;
+ u64 align_ptr_bytes:4;
+ u64 align_hdr_bytes:4;
+ u64 rsvd1:4;
+ u64 tcp_pkt_cnt:12;
+#endif
+};
+
+/* Always Big endian */
+struct rx_hdr_t {
+ u64 opaque:32;
+ u64 rss_flow:8;
+ u64 skip_length:6;
+ u64 disable_rss:1;
+ u64 disable_tcp_reassembly:1;
+ u64 nodrop:1;
+ u64 dest_alg:2;
+ u64 rsvd0:2;
+ u64 dest_rq:11;
+};
+
+enum send_l4_csum_type {
+ SEND_L4_CSUM_DISABLE = 0x00,
+ SEND_L4_CSUM_UDP = 0x01,
+ SEND_L4_CSUM_TCP = 0x02,
+ SEND_L4_CSUM_SCTP = 0x03,
+};
+
+enum send_crc_alg {
+ SEND_CRCALG_CRC32 = 0x00,
+ SEND_CRCALG_CRC32C = 0x01,
+ SEND_CRCALG_ICRC = 0x02,
+};
+
+enum send_load_type {
+ SEND_LD_TYPE_LDD = 0x00,
+ SEND_LD_TYPE_LDT = 0x01,
+ SEND_LD_TYPE_LDWB = 0x02,
+};
+
+enum send_mem_alg_type {
+ SEND_MEMALG_SET = 0x00,
+ SEND_MEMALG_ADD = 0x08,
+ SEND_MEMALG_SUB = 0x09,
+ SEND_MEMALG_ADDLEN = 0x0A,
+ SEND_MEMALG_SUBLEN = 0x0B,
+};
+
+enum send_mem_dsz_type {
+ SEND_MEMDSZ_B64 = 0x00,
+ SEND_MEMDSZ_B32 = 0x01,
+ SEND_MEMDSZ_B8 = 0x03,
+};
+
+enum sq_subdesc_type {
+ SQ_DESC_TYPE_INVALID = 0x00,
+ SQ_DESC_TYPE_HEADER = 0x01,
+ SQ_DESC_TYPE_CRC = 0x02,
+ SQ_DESC_TYPE_IMMEDIATE = 0x03,
+ SQ_DESC_TYPE_GATHER = 0x04,
+ SQ_DESC_TYPE_MEMORY = 0x05,
+};
+
+struct sq_crc_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd1:32;
+ u64 crc_ival:32;
+ u64 subdesc_type:4;
+ u64 crc_alg:2;
+ u64 rsvd0:10;
+ u64 crc_insert_pos:16;
+ u64 hdr_start:16;
+ u64 crc_len:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 crc_len:16;
+ u64 hdr_start:16;
+ u64 crc_insert_pos:16;
+ u64 rsvd0:10;
+ u64 crc_alg:2;
+ u64 subdesc_type:4;
+ u64 crc_ival:32;
+ u64 rsvd1:32;
+#endif
+};
+
+struct sq_gather_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 ld_type:2;
+ u64 rsvd0:42;
+ u64 size:16;
+
+ u64 rsvd1:15; /* W1 */
+ u64 addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 size:16;
+ u64 rsvd0:42;
+ u64 ld_type:2;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 addr:49;
+ u64 rsvd1:15; /* W1 */
+#endif
+};
+
+/* SQ immediate subdescriptor */
+struct sq_imm_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 rsvd0:46;
+ u64 len:14;
+
+ u64 data:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 len:14;
+ u64 rsvd0:46;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 data:64; /* W1 */
+#endif
+};
+
+struct sq_mem_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 mem_alg:4;
+ u64 mem_dsz:2;
+ u64 wmem:1;
+ u64 rsvd0:21;
+ u64 offset:32;
+
+ u64 rsvd1:15; /* W1 */
+ u64 addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 offset:32;
+ u64 rsvd0:21;
+ u64 wmem:1;
+ u64 mem_dsz:2;
+ u64 mem_alg:4;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 addr:49;
+ u64 rsvd1:15; /* W1 */
+#endif
+};
+
+struct sq_hdr_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4;
+ u64 tso:1;
+ u64 post_cqe:1; /* Post CQE on no error also */
+ u64 dont_send:1;
+ u64 tstmp:1;
+ u64 subdesc_cnt:8;
+ u64 csum_l4:2;
+ u64 csum_l3:1;
+ u64 rsvd0:5;
+ u64 l4_offset:8;
+ u64 l3_offset:8;
+ u64 rsvd1:4;
+ u64 tot_len:20; /* W0 */
+
+ u64 tso_sdc_cont:8;
+ u64 tso_sdc_first:8;
+ u64 tso_l4_offset:8;
+ u64 tso_flags_last:12;
+ u64 tso_flags_first:12;
+ u64 rsvd2:2;
+ u64 tso_max_paysize:14; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tot_len:20;
+ u64 rsvd1:4;
+ u64 l3_offset:8;
+ u64 l4_offset:8;
+ u64 rsvd0:5;
+ u64 csum_l3:1;
+ u64 csum_l4:2;
+ u64 subdesc_cnt:8;
+ u64 tstmp:1;
+ u64 dont_send:1;
+ u64 post_cqe:1; /* Post CQE on no error also */
+ u64 tso:1;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 tso_max_paysize:14;
+ u64 rsvd2:2;
+ u64 tso_flags_first:12;
+ u64 tso_flags_last:12;
+ u64 tso_l4_offset:8;
+ u64 tso_sdc_first:8;
+ u64 tso_sdc_cont:8; /* W1 */
+#endif
+};
+
+/* Queue config register formats */
+struct rq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_2_63:62;
+ u64 ena:1;
+ u64 tcp_ena:1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tcp_ena:1;
+ u64 ena:1;
+ u64 reserved_2_63:62;
+#endif
+};
+
+struct cq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_43_63:21;
+ u64 ena:1;
+ u64 reset:1;
+ u64 caching:1;
+ u64 reserved_35_39:5;
+ u64 qsize:3;
+ u64 reserved_25_31:7;
+ u64 avg_con:9;
+ u64 reserved_0_15:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 reserved_0_15:16;
+ u64 avg_con:9;
+ u64 reserved_25_31:7;
+ u64 qsize:3;
+ u64 reserved_35_39:5;
+ u64 caching:1;
+ u64 reset:1;
+ u64 ena:1;
+ u64 reserved_43_63:21;
+#endif
+};
+
+struct sq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_20_63:44;
+ u64 ena:1;
+ u64 reserved_18_18:1;
+ u64 reset:1;
+ u64 ldwb:1;
+ u64 reserved_11_15:5;
+ u64 qsize:3;
+ u64 reserved_3_7:5;
+ u64 tstmp_bgx_intf:3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tstmp_bgx_intf:3;
+ u64 reserved_3_7:5;
+ u64 qsize:3;
+ u64 reserved_11_15:5;
+ u64 ldwb:1;
+ u64 reset:1;
+ u64 reserved_18_18:1;
+ u64 ena:1;
+ u64 reserved_20_63:44;
+#endif
+};
+
+struct rbdr_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_45_63:19;
+ u64 ena:1;
+ u64 reset:1;
+ u64 ldwb:1;
+ u64 reserved_36_41:6;
+ u64 qsize:4;
+ u64 reserved_25_31:7;
+ u64 avg_con:9;
+ u64 reserved_12_15:4;
+ u64 lines:12;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 lines:12;
+ u64 reserved_12_15:4;
+ u64 avg_con:9;
+ u64 reserved_25_31:7;
+ u64 qsize:4;
+ u64 reserved_36_41:6;
+ u64 ldwb:1;
+ u64 reset:1;
+ u64 ena: 1;
+ u64 reserved_45_63:19;
+#endif
+};
+
+struct qs_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_32_63:32;
+ u64 ena:1;
+ u64 reserved_27_30:4;
+ u64 sq_ins_ena:1;
+ u64 sq_ins_pos:6;
+ u64 lock_ena:1;
+ u64 lock_viol_cqe_ena:1;
+ u64 send_tstmp_ena:1;
+ u64 be:1;
+ u64 reserved_7_15:9;
+ u64 vnic:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 vnic:7;
+ u64 reserved_7_15:9;
+ u64 be:1;
+ u64 send_tstmp_ena:1;
+ u64 lock_viol_cqe_ena:1;
+ u64 lock_ena:1;
+ u64 sq_ins_pos:6;
+ u64 sq_ins_ena:1;
+ u64 reserved_27_30:4;
+ u64 ena:1;
+ u64 reserved_32_63:32;
+#endif
+};
+
+#endif /* Q_STRUCT_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
new file mode 100644
index 000000000000..633ec05dfe05
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -0,0 +1,966 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-BGX"
+#define DRV_VERSION "1.0"
+
+struct lmac {
+ struct bgx *bgx;
+ int dmac;
+ unsigned char mac[ETH_ALEN];
+ bool link_up;
+ int lmacid; /* ID within BGX */
+ int lmacid_bd; /* ID on board */
+ struct net_device netdev;
+ struct phy_device *phydev;
+ unsigned int last_duplex;
+ unsigned int last_link;
+ unsigned int last_speed;
+ bool is_sgmii;
+ struct delayed_work dwork;
+ struct workqueue_struct *check_link;
+};
+
+struct bgx {
+ u8 bgx_id;
+ u8 qlm_mode;
+ struct lmac lmac[MAX_LMAC_PER_BGX];
+ int lmac_count;
+ int lmac_type;
+ int lane_to_sds;
+ int use_training;
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+};
+
+static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
+static int lmac_count; /* Total no of LMACs in system */
+
+static int bgx_xaui_check_link(struct lmac *lmac);
+
+/* Supported devices */
+static const struct pci_device_id bgx_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Cavium Inc");
+MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, bgx_id_table);
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ return readq_relaxed(addr);
+}
+
+static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ writeq_relaxed(val, addr);
+}
+
+static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ writeq_relaxed(val | readq_relaxed(addr), addr);
+}
+
+static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
+{
+ int timeout = 100;
+ u64 reg_val;
+
+ while (timeout) {
+ reg_val = bgx_reg_read(bgx, lmac, reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ return 1;
+}
+
+/* Return number of BGX present in HW */
+unsigned bgx_get_map(int node)
+{
+ int i;
+ unsigned map = 0;
+
+ for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
+ if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
+ map |= (1 << i);
+ }
+
+ return map;
+}
+EXPORT_SYMBOL(bgx_get_map);
+
+/* Return number of LMAC configured for this BGX */
+int bgx_get_lmac_count(int node, int bgx_idx)
+{
+ struct bgx *bgx;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (bgx)
+ return bgx->lmac_count;
+
+ return 0;
+}
+EXPORT_SYMBOL(bgx_get_lmac_count);
+
+/* Returns the current link status of LMAC */
+void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
+{
+ struct bgx_link_status *link = (struct bgx_link_status *)status;
+ struct bgx *bgx;
+ struct lmac *lmac;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmacid];
+ link->link_up = lmac->link_up;
+ link->duplex = lmac->last_duplex;
+ link->speed = lmac->last_speed;
+}
+EXPORT_SYMBOL(bgx_get_lmac_link_state);
+
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
+{
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+
+ if (bgx)
+ return bgx->lmac[lmacid].mac;
+
+ return NULL;
+}
+EXPORT_SYMBOL(bgx_get_lmac_mac);
+
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
+{
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+
+ if (!bgx)
+ return;
+
+ ether_addr_copy(bgx->lmac[lmacid].mac, mac);
+}
+EXPORT_SYMBOL(bgx_set_lmac_mac);
+
+static void bgx_sgmii_change_link_state(struct lmac *lmac)
+{
+ struct bgx *bgx = lmac->bgx;
+ u64 cmr_cfg;
+ u64 port_cfg = 0;
+ u64 misc_ctl = 0;
+
+ cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
+ cmr_cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+
+ port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
+ misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
+
+ if (lmac->link_up) {
+ misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
+ port_cfg &= ~GMI_PORT_CFG_DUPLEX;
+ port_cfg |= (lmac->last_duplex << 2);
+ } else {
+ misc_ctl |= PCS_MISC_CTL_GMX_ENO;
+ }
+
+ switch (lmac->last_speed) {
+ case 10:
+ port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
+ port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
+ port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 50; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+ break;
+ case 100:
+ port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
+ port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
+ port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 5; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+ break;
+ case 1000:
+ port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
+ port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
+ port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 1; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
+ if (lmac->last_duplex)
+ bgx_reg_write(bgx, lmac->lmacid,
+ BGX_GMP_GMI_TXX_BURST, 0);
+ else
+ bgx_reg_write(bgx, lmac->lmacid,
+ BGX_GMP_GMI_TXX_BURST, 8192);
+ break;
+ default:
+ break;
+ }
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
+
+ port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
+
+ /* renable lmac */
+ cmr_cfg |= CMR_EN;
+ bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+}
+
+static void bgx_lmac_handler(struct net_device *netdev)
+{
+ struct lmac *lmac = container_of(netdev, struct lmac, netdev);
+ struct phy_device *phydev = lmac->phydev;
+ int link_changed = 0;
+
+ if (!lmac)
+ return;
+
+ if (!phydev->link && lmac->last_link)
+ link_changed = -1;
+
+ if (phydev->link &&
+ (lmac->last_duplex != phydev->duplex ||
+ lmac->last_link != phydev->link ||
+ lmac->last_speed != phydev->speed)) {
+ link_changed = 1;
+ }
+
+ lmac->last_link = phydev->link;
+ lmac->last_speed = phydev->speed;
+ lmac->last_duplex = phydev->duplex;
+
+ if (!link_changed)
+ return;
+
+ if (link_changed > 0)
+ lmac->link_up = true;
+ else
+ lmac->link_up = false;
+
+ if (lmac->is_sgmii)
+ bgx_sgmii_change_link_state(lmac);
+ else
+ bgx_xaui_check_link(lmac);
+}
+
+u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
+{
+ struct bgx *bgx;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (!bgx)
+ return 0;
+
+ if (idx > 8)
+ lmac = 0;
+ return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
+}
+EXPORT_SYMBOL(bgx_get_rx_stats);
+
+u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
+{
+ struct bgx *bgx;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (!bgx)
+ return 0;
+
+ return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
+}
+EXPORT_SYMBOL(bgx_get_tx_stats);
+
+static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
+{
+ u64 offset;
+
+ while (bgx->lmac[lmac].dmac > 0) {
+ offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
+ (lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
+ bgx->lmac[lmac].dmac--;
+ }
+}
+
+static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
+{
+ u64 cfg;
+
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
+ /* max packet size */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
+
+ /* Disable frame alignment if using preamble */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+ if (cfg & 1)
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+ /* PCS reset */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
+ PCS_MRX_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
+ return -1;
+ }
+
+ /* power down, reset autoneg, autoneg enable */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
+ cfg &= ~PCS_MRX_CTL_PWR_DN;
+ cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
+ bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
+
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
+ PCS_MRX_STATUS_AN_CPT, false)) {
+ dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
+{
+ u64 cfg;
+
+ /* Reset SPU */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
+ return -1;
+ }
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ /* Set interleaved running disparity for RXAUI */
+ if (bgx->lmac_type != BGX_MODE_RXAUI)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+ else
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
+ SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
+
+ /* clear all interrupts */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+
+ if (bgx->use_training) {
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
+ /* training enable */
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
+ }
+
+ /* Append FCS to each packet */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
+
+ /* Disable forward error correction */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
+ cfg &= ~SPU_FEC_CTL_FEC_EN;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
+
+ /* Disable autoneg */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
+ cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
+ if (bgx->lmac_type == BGX_MODE_10G_KR)
+ cfg |= (1 << 23);
+ else if (bgx->lmac_type == BGX_MODE_40G_KR)
+ cfg |= (1 << 24);
+ else
+ cfg &= ~((1 << 23) | (1 << 24));
+ cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
+
+ cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
+ cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
+ bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
+ cfg &= ~SPU_CTL_LOW_POWER;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
+ cfg &= ~SMU_TX_CTL_UNI_EN;
+ cfg |= SMU_TX_CTL_DIC_EN;
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
+
+ /* take lmac_count into account */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
+ /* max packet size */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
+
+ return 0;
+}
+
+static int bgx_xaui_check_link(struct lmac *lmac)
+{
+ struct bgx *bgx = lmac->bgx;
+ int lmacid = lmac->lmacid;
+ int lmac_type = bgx->lmac_type;
+ u64 cfg;
+
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+ if (bgx->use_training) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ if (!(cfg & (1ull << 13))) {
+ cfg = (1ull << 13) | (1ull << 14);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
+ cfg |= (1ull << 0);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
+ return -1;
+ }
+ }
+
+ /* wait for PCS to come out of reset */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
+ return -1;
+ }
+
+ if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
+ (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
+ SPU_BR_STATUS_BLK_LOCK, false)) {
+ dev_err(&bgx->pdev->dev,
+ "SPU_BR_STATUS_BLK_LOCK not completed\n");
+ return -1;
+ }
+ } else {
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
+ SPU_BX_STATUS_RX_ALIGN, false)) {
+ dev_err(&bgx->pdev->dev,
+ "SPU_BX_STATUS_RX_ALIGN not completed\n");
+ return -1;
+ }
+ }
+
+ /* Clear rcvflt bit (latching high) and read it back */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+ dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
+ if (bgx->use_training) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ if (!(cfg & (1ull << 13))) {
+ cfg = (1ull << 13) | (1ull << 14);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL);
+ cfg |= (1ull << 0);
+ bgx_reg_write(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL, cfg);
+ return -1;
+ }
+ }
+ return -1;
+ }
+
+ /* Wait for MAC RX to be ready */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
+ SMU_RX_CTL_STATUS, true)) {
+ dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
+ return -1;
+ }
+
+ /* Wait for BGX RX to be idle */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
+ return -1;
+ }
+
+ /* Wait for BGX TX to be idle */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
+ return -1;
+ }
+
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+ dev_err(&bgx->pdev->dev, "Receive fault\n");
+ return -1;
+ }
+
+ /* Receive link is latching low. Force it high and verify it */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
+ SPU_STATUS1_RCV_LNK, false)) {
+ dev_err(&bgx->pdev->dev, "SPU receive link down\n");
+ return -1;
+ }
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
+ cfg &= ~SPU_MISC_CTL_RX_DIS;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
+ return 0;
+}
+
+static void bgx_poll_for_link(struct work_struct *work)
+{
+ struct lmac *lmac;
+ u64 link;
+
+ lmac = container_of(work, struct lmac, dwork.work);
+
+ /* Receive link is latching low. Force it high and verify it */
+ bgx_reg_modify(lmac->bgx, lmac->lmacid,
+ BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
+ bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
+ SPU_STATUS1_RCV_LNK, false);
+
+ link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+ if (link & SPU_STATUS1_RCV_LNK) {
+ lmac->link_up = 1;
+ if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
+ lmac->last_speed = 40000;
+ else
+ lmac->last_speed = 10000;
+ lmac->last_duplex = 1;
+ } else {
+ lmac->link_up = 0;
+ }
+
+ if (lmac->last_link != lmac->link_up) {
+ lmac->last_link = lmac->link_up;
+ if (lmac->link_up)
+ bgx_xaui_check_link(lmac);
+ }
+
+ queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
+}
+
+static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
+{
+ struct lmac *lmac;
+ u64 cfg;
+
+ lmac = &bgx->lmac[lmacid];
+ lmac->bgx = bgx;
+
+ if (bgx->lmac_type == BGX_MODE_SGMII) {
+ lmac->is_sgmii = 1;
+ if (bgx_lmac_sgmii_init(bgx, lmacid))
+ return -1;
+ } else {
+ lmac->is_sgmii = 0;
+ if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
+ return -1;
+ }
+
+ if (lmac->is_sgmii) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+ cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
+ } else {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
+ cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
+ }
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
+ CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+
+ /* Restore default cfg, incase low level firmware changed it */
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
+
+ if ((bgx->lmac_type != BGX_MODE_XFI) &&
+ (bgx->lmac_type != BGX_MODE_XLAUI) &&
+ (bgx->lmac_type != BGX_MODE_40G_KR) &&
+ (bgx->lmac_type != BGX_MODE_10G_KR)) {
+ if (!lmac->phydev)
+ return -ENODEV;
+
+ lmac->phydev->dev_flags = 0;
+
+ if (phy_connect_direct(&lmac->netdev, lmac->phydev,
+ bgx_lmac_handler,
+ PHY_INTERFACE_MODE_SGMII))
+ return -ENODEV;
+
+ phy_start_aneg(lmac->phydev);
+ } else {
+ lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!lmac->check_link)
+ return -ENOMEM;
+ INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+ queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+ }
+
+ return 0;
+}
+
+static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
+{
+ struct lmac *lmac;
+ u64 cmrx_cfg;
+
+ lmac = &bgx->lmac[lmacid];
+ if (lmac->check_link) {
+ /* Destroy work queue */
+ cancel_delayed_work(&lmac->dwork);
+ flush_workqueue(lmac->check_link);
+ destroy_workqueue(lmac->check_link);
+ }
+
+ cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cmrx_cfg &= ~(1 << 15);
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+ bgx_flush_dmac_addrs(bgx, lmacid);
+
+ if (lmac->phydev)
+ phy_disconnect(lmac->phydev);
+
+ lmac->phydev = NULL;
+}
+
+static void bgx_set_num_ports(struct bgx *bgx)
+{
+ u64 lmac_count;
+
+ switch (bgx->qlm_mode) {
+ case QLM_MODE_SGMII:
+ bgx->lmac_count = 4;
+ bgx->lmac_type = BGX_MODE_SGMII;
+ bgx->lane_to_sds = 0;
+ break;
+ case QLM_MODE_XAUI_1X4:
+ bgx->lmac_count = 1;
+ bgx->lmac_type = BGX_MODE_XAUI;
+ bgx->lane_to_sds = 0xE4;
+ break;
+ case QLM_MODE_RXAUI_2X2:
+ bgx->lmac_count = 2;
+ bgx->lmac_type = BGX_MODE_RXAUI;
+ bgx->lane_to_sds = 0xE4;
+ break;
+ case QLM_MODE_XFI_4X1:
+ bgx->lmac_count = 4;
+ bgx->lmac_type = BGX_MODE_XFI;
+ bgx->lane_to_sds = 0;
+ break;
+ case QLM_MODE_XLAUI_1X4:
+ bgx->lmac_count = 1;
+ bgx->lmac_type = BGX_MODE_XLAUI;
+ bgx->lane_to_sds = 0xE4;
+ break;
+ case QLM_MODE_10G_KR_4X1:
+ bgx->lmac_count = 4;
+ bgx->lmac_type = BGX_MODE_10G_KR;
+ bgx->lane_to_sds = 0;
+ bgx->use_training = 1;
+ break;
+ case QLM_MODE_40G_KR4_1X4:
+ bgx->lmac_count = 1;
+ bgx->lmac_type = BGX_MODE_40G_KR;
+ bgx->lane_to_sds = 0xE4;
+ bgx->use_training = 1;
+ break;
+ default:
+ bgx->lmac_count = 0;
+ break;
+ }
+
+ /* Check if low level firmware has programmed LMAC count
+ * based on board type, if yes consider that otherwise
+ * the default static values
+ */
+ lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
+ if (lmac_count != 4)
+ bgx->lmac_count = lmac_count;
+}
+
+static void bgx_init_hw(struct bgx *bgx)
+{
+ int i;
+
+ bgx_set_num_ports(bgx);
+
+ bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
+ if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
+ dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
+
+ /* Set lmac type and lane2serdes mapping */
+ for (i = 0; i < bgx->lmac_count; i++) {
+ if (bgx->lmac_type == BGX_MODE_RXAUI) {
+ if (i)
+ bgx->lane_to_sds = 0x0e;
+ else
+ bgx->lane_to_sds = 0x04;
+ bgx_reg_write(bgx, i, BGX_CMRX_CFG,
+ (bgx->lmac_type << 8) | bgx->lane_to_sds);
+ continue;
+ }
+ bgx_reg_write(bgx, i, BGX_CMRX_CFG,
+ (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
+ bgx->lmac[i].lmacid_bd = lmac_count;
+ lmac_count++;
+ }
+
+ bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
+
+ /* Set the backpressure AND mask */
+ for (i = 0; i < bgx->lmac_count; i++)
+ bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
+ ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
+ (i * MAX_BGX_CHANS_PER_LMAC));
+
+ /* Disable all MAC filtering */
+ for (i = 0; i < RX_DMAC_COUNT; i++)
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
+
+ /* Disable MAC steering (NCSI traffic) */
+ for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
+}
+
+static void bgx_get_qlm_mode(struct bgx *bgx)
+{
+ struct device *dev = &bgx->pdev->dev;
+ int lmac_type;
+ int train_en;
+
+ /* Read LMAC0 type to figure out QLM mode
+ * This is configured by low level firmware
+ */
+ lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
+ lmac_type = (lmac_type >> 8) & 0x07;
+
+ train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
+ SPU_PMD_CRTL_TRAIN_EN;
+
+ switch (lmac_type) {
+ case BGX_MODE_SGMII:
+ bgx->qlm_mode = QLM_MODE_SGMII;
+ dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id);
+ break;
+ case BGX_MODE_XAUI:
+ bgx->qlm_mode = QLM_MODE_XAUI_1X4;
+ dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id);
+ break;
+ case BGX_MODE_RXAUI:
+ bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
+ dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id);
+ break;
+ case BGX_MODE_XFI:
+ if (!train_en) {
+ bgx->qlm_mode = QLM_MODE_XFI_4X1;
+ dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id);
+ } else {
+ bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
+ dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id);
+ }
+ break;
+ case BGX_MODE_XLAUI:
+ if (!train_en) {
+ bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
+ dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id);
+ } else {
+ bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
+ dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id);
+ }
+ break;
+ default:
+ bgx->qlm_mode = QLM_MODE_SGMII;
+ dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id);
+ }
+}
+
+static void bgx_init_of(struct bgx *bgx, struct device_node *np)
+{
+ struct device_node *np_child;
+ u8 lmac = 0;
+
+ for_each_child_of_node(np, np_child) {
+ struct device_node *phy_np;
+ const char *mac;
+
+ phy_np = of_parse_phandle(np_child, "phy-handle", 0);
+ if (phy_np)
+ bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
+
+ mac = of_get_mac_address(np_child);
+ if (mac)
+ ether_addr_copy(bgx->lmac[lmac].mac, mac);
+
+ SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
+ bgx->lmac[lmac].lmacid = lmac;
+ lmac++;
+ if (lmac == MAX_LMAC_PER_BGX)
+ break;
+ }
+}
+
+static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ struct bgx *bgx = NULL;
+ struct device_node *np;
+ char bgx_sel[5];
+ u8 lmac;
+
+ bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
+ if (!bgx)
+ return -ENOMEM;
+ bgx->pdev = pdev;
+
+ pci_set_drvdata(pdev, bgx);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!bgx->reg_base) {
+ dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+ bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
+ bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX;
+
+ bgx_vnic[bgx->bgx_id] = bgx;
+ bgx_get_qlm_mode(bgx);
+
+ snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
+ np = of_find_node_by_name(NULL, bgx_sel);
+ if (np)
+ bgx_init_of(bgx, np);
+
+ bgx_init_hw(bgx);
+
+ /* Enable all LMACs */
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ err = bgx_lmac_enable(bgx, lmac);
+ if (err) {
+ dev_err(dev, "BGX%d failed to enable lmac%d\n",
+ bgx->bgx_id, lmac);
+ goto err_enable;
+ }
+ }
+
+ return 0;
+
+err_enable:
+ bgx_vnic[bgx->bgx_id] = NULL;
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void bgx_remove(struct pci_dev *pdev)
+{
+ struct bgx *bgx = pci_get_drvdata(pdev);
+ u8 lmac;
+
+ /* Disable all LMACs */
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+ bgx_lmac_disable(bgx, lmac);
+
+ bgx_vnic[bgx->bgx_id] = NULL;
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver bgx_driver = {
+ .name = DRV_NAME,
+ .id_table = bgx_id_table,
+ .probe = bgx_probe,
+ .remove = bgx_remove,
+};
+
+static int __init bgx_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&bgx_driver);
+}
+
+static void __exit bgx_cleanup_module(void)
+{
+ pci_unregister_driver(&bgx_driver);
+}
+
+module_init(bgx_init_module);
+module_exit(bgx_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
new file mode 100644
index 000000000000..ba4f53b7cc2c
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef THUNDER_BGX_H
+#define THUNDER_BGX_H
+
+#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */
+#define MAX_BGX_PER_CN88XX 2
+#define MAX_LMAC_PER_BGX 4
+#define MAX_BGX_CHANS_PER_LMAC 16
+#define MAX_DMAC_PER_LMAC 8
+#define MAX_FRAME_SIZE 9216
+
+#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
+
+#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX)
+
+/* Registers */
+#define BGX_CMRX_CFG 0x00
+#define CMR_PKT_TX_EN BIT_ULL(13)
+#define CMR_PKT_RX_EN BIT_ULL(14)
+#define CMR_EN BIT_ULL(15)
+#define BGX_CMR_GLOBAL_CFG 0x08
+#define CMR_GLOBAL_CFG_FCS_STRIP BIT_ULL(6)
+#define BGX_CMRX_RX_ID_MAP 0x60
+#define BGX_CMRX_RX_STAT0 0x70
+#define BGX_CMRX_RX_STAT1 0x78
+#define BGX_CMRX_RX_STAT2 0x80
+#define BGX_CMRX_RX_STAT3 0x88
+#define BGX_CMRX_RX_STAT4 0x90
+#define BGX_CMRX_RX_STAT5 0x98
+#define BGX_CMRX_RX_STAT6 0xA0
+#define BGX_CMRX_RX_STAT7 0xA8
+#define BGX_CMRX_RX_STAT8 0xB0
+#define BGX_CMRX_RX_STAT9 0xB8
+#define BGX_CMRX_RX_STAT10 0xC0
+#define BGX_CMRX_RX_BP_DROP 0xC8
+#define BGX_CMRX_RX_DMAC_CTL 0x0E8
+#define BGX_CMR_RX_DMACX_CAM 0x200
+#define RX_DMACX_CAM_EN BIT_ULL(48)
+#define RX_DMACX_CAM_LMACID(x) (x << 49)
+#define RX_DMAC_COUNT 32
+#define BGX_CMR_RX_STREERING 0x300
+#define RX_TRAFFIC_STEER_RULE_COUNT 8
+#define BGX_CMR_CHAN_MSK_AND 0x450
+#define BGX_CMR_BIST_STATUS 0x460
+#define BGX_CMR_RX_LMACS 0x468
+#define BGX_CMRX_TX_STAT0 0x600
+#define BGX_CMRX_TX_STAT1 0x608
+#define BGX_CMRX_TX_STAT2 0x610
+#define BGX_CMRX_TX_STAT3 0x618
+#define BGX_CMRX_TX_STAT4 0x620
+#define BGX_CMRX_TX_STAT5 0x628
+#define BGX_CMRX_TX_STAT6 0x630
+#define BGX_CMRX_TX_STAT7 0x638
+#define BGX_CMRX_TX_STAT8 0x640
+#define BGX_CMRX_TX_STAT9 0x648
+#define BGX_CMRX_TX_STAT10 0x650
+#define BGX_CMRX_TX_STAT11 0x658
+#define BGX_CMRX_TX_STAT12 0x660
+#define BGX_CMRX_TX_STAT13 0x668
+#define BGX_CMRX_TX_STAT14 0x670
+#define BGX_CMRX_TX_STAT15 0x678
+#define BGX_CMRX_TX_STAT16 0x680
+#define BGX_CMRX_TX_STAT17 0x688
+#define BGX_CMR_TX_LMACS 0x1000
+
+#define BGX_SPUX_CONTROL1 0x10000
+#define SPU_CTL_LOW_POWER BIT_ULL(11)
+#define SPU_CTL_RESET BIT_ULL(15)
+#define BGX_SPUX_STATUS1 0x10008
+#define SPU_STATUS1_RCV_LNK BIT_ULL(2)
+#define BGX_SPUX_STATUS2 0x10020
+#define SPU_STATUS2_RCVFLT BIT_ULL(10)
+#define BGX_SPUX_BX_STATUS 0x10028
+#define SPU_BX_STATUS_RX_ALIGN BIT_ULL(12)
+#define BGX_SPUX_BR_STATUS1 0x10030
+#define SPU_BR_STATUS_BLK_LOCK BIT_ULL(0)
+#define SPU_BR_STATUS_RCV_LNK BIT_ULL(12)
+#define BGX_SPUX_BR_PMD_CRTL 0x10068
+#define SPU_PMD_CRTL_TRAIN_EN BIT_ULL(1)
+#define BGX_SPUX_BR_PMD_LP_CUP 0x10078
+#define BGX_SPUX_BR_PMD_LD_CUP 0x10088
+#define BGX_SPUX_BR_PMD_LD_REP 0x10090
+#define BGX_SPUX_FEC_CONTROL 0x100A0
+#define SPU_FEC_CTL_FEC_EN BIT_ULL(0)
+#define SPU_FEC_CTL_ERR_EN BIT_ULL(1)
+#define BGX_SPUX_AN_CONTROL 0x100C8
+#define SPU_AN_CTL_AN_EN BIT_ULL(12)
+#define SPU_AN_CTL_XNP_EN BIT_ULL(13)
+#define BGX_SPUX_AN_ADV 0x100D8
+#define BGX_SPUX_MISC_CONTROL 0x10218
+#define SPU_MISC_CTL_INTLV_RDISP BIT_ULL(10)
+#define SPU_MISC_CTL_RX_DIS BIT_ULL(12)
+#define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */
+#define BGX_SPUX_INT_W1S 0x10228
+#define BGX_SPUX_INT_ENA_W1C 0x10230
+#define BGX_SPUX_INT_ENA_W1S 0x10238
+#define BGX_SPU_DBG_CONTROL 0x10300
+#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN BIT_ULL(18)
+#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
+
+#define BGX_SMUX_RX_INT 0x20000
+#define BGX_SMUX_RX_JABBER 0x20030
+#define BGX_SMUX_RX_CTL 0x20048
+#define SMU_RX_CTL_STATUS (3ull << 0)
+#define BGX_SMUX_TX_APPEND 0x20100
+#define SMU_TX_APPEND_FCS_D BIT_ULL(2)
+#define BGX_SMUX_TX_MIN_PKT 0x20118
+#define BGX_SMUX_TX_INT 0x20140
+#define BGX_SMUX_TX_CTL 0x20178
+#define SMU_TX_CTL_DIC_EN BIT_ULL(0)
+#define SMU_TX_CTL_UNI_EN BIT_ULL(1)
+#define SMU_TX_CTL_LNK_STATUS (3ull << 4)
+#define BGX_SMUX_TX_THRESH 0x20180
+#define BGX_SMUX_CTL 0x20200
+#define SMU_CTL_RX_IDLE BIT_ULL(0)
+#define SMU_CTL_TX_IDLE BIT_ULL(1)
+
+#define BGX_GMP_PCS_MRX_CTL 0x30000
+#define PCS_MRX_CTL_RST_AN BIT_ULL(9)
+#define PCS_MRX_CTL_PWR_DN BIT_ULL(11)
+#define PCS_MRX_CTL_AN_EN BIT_ULL(12)
+#define PCS_MRX_CTL_RESET BIT_ULL(15)
+#define BGX_GMP_PCS_MRX_STATUS 0x30008
+#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
+#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
+#define BGX_GMP_PCS_MISCX_CTL 0x30078
+#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
+#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
+#define BGX_GMP_GMI_PRTX_CFG 0x38020
+#define GMI_PORT_CFG_SPEED BIT_ULL(1)
+#define GMI_PORT_CFG_DUPLEX BIT_ULL(2)
+#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3)
+#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
+#define BGX_GMP_GMI_RXX_JABBER 0x38038
+#define BGX_GMP_GMI_TXX_THRESH 0x38210
+#define BGX_GMP_GMI_TXX_APPEND 0x38218
+#define BGX_GMP_GMI_TXX_SLOT 0x38220
+#define BGX_GMP_GMI_TXX_BURST 0x38228
+#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
+#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
+
+#define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
+#define BGX_MSIX_VEC_0_29_CTL 0x400008
+#define BGX_MSIX_PBA_0 0x4F0000
+
+/* MSI-X interrupts */
+#define BGX_MSIX_VECTORS 30
+#define BGX_LMAC_VEC_OFFSET 7
+#define BGX_MSIX_VEC_SHIFT 4
+
+#define CMRX_INT 0
+#define SPUX_INT 1
+#define SMUX_RX_INT 2
+#define SMUX_TX_INT 3
+#define GMPX_PCS_INT 4
+#define GMPX_GMI_RX_INT 5
+#define GMPX_GMI_TX_INT 6
+#define CMR_MEM_INT 28
+#define SPU_MEM_INT 29
+
+#define LMAC_INTR_LINK_UP BIT(0)
+#define LMAC_INTR_LINK_DOWN BIT(1)
+
+/* RX_DMAC_CTL configuration*/
+enum MCAST_MODE {
+ MCAST_MODE_REJECT,
+ MCAST_MODE_ACCEPT,
+ MCAST_MODE_CAM_FILTER,
+ RSVD
+};
+
+#define BCAST_ACCEPT 1
+#define CAM_ACCEPT 1
+
+void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
+unsigned bgx_get_map(int node);
+int bgx_get_lmac_count(int node, int bgx);
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
+void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
+u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
+u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
+#define BGX_RX_STATS_COUNT 11
+#define BGX_TX_STATS_COUNT 18
+
+struct bgx_stats {
+ u64 rx_stats[BGX_RX_STATS_COUNT];
+ u64 tx_stats[BGX_TX_STATS_COUNT];
+};
+
+enum LMAC_TYPE {
+ BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */
+ BGX_MODE_XAUI = 1, /* 4 lanes, 3.125 Gbaud */
+ BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */
+ BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */
+ BGX_MODE_XFI = 3, /* 1 lane, 10.3125 Gbaud */
+ BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
+ BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
+ BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
+};
+
+enum qlm_mode {
+ QLM_MODE_SGMII, /* SGMII, each lane independent */
+ QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes */
+ QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each */
+ QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each */
+ QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each */
+ QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each */
+ QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each */
+};
+
+#endif /* THUNDER_BGX_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 524d11098c56..4d627a8f04b0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -46,17 +46,19 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
#include <asm/io.h>
#include "cxgb4_uld.h"
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
enum {
- MAX_NPORTS = 4, /* max # of ports */
- SERNUM_LEN = 24, /* Serial # length */
- EC_LEN = 16, /* E/C length */
- ID_LEN = 16, /* ID length */
- PN_LEN = 16, /* Part Number length */
+ MAX_NPORTS = 4, /* max # of ports */
+ SERNUM_LEN = 24, /* Serial # length */
+ EC_LEN = 16, /* E/C length */
+ ID_LEN = 16, /* ID length */
+ PN_LEN = 16, /* Part Number length */
+ MACADDR_LEN = 12, /* MAC Address length */
};
enum {
@@ -198,23 +200,45 @@ struct lb_port_stats {
};
struct tp_tcp_stats {
- u32 tcpOutRsts;
- u64 tcpInSegs;
- u64 tcpOutSegs;
- u64 tcpRetransSegs;
+ u32 tcp_out_rsts;
+ u64 tcp_in_segs;
+ u64 tcp_out_segs;
+ u64 tcp_retrans_segs;
+};
+
+struct tp_usm_stats {
+ u32 frames;
+ u32 drops;
+ u64 octets;
+};
+
+struct tp_fcoe_stats {
+ u32 frames_ddp;
+ u32 frames_drop;
+ u64 octets_ddp;
};
struct tp_err_stats {
- u32 macInErrs[4];
- u32 hdrInErrs[4];
- u32 tcpInErrs[4];
- u32 tnlCongDrops[4];
- u32 ofldChanDrops[4];
- u32 tnlTxDrops[4];
- u32 ofldVlanDrops[4];
- u32 tcp6InErrs[4];
- u32 ofldNoNeigh;
- u32 ofldCongDefer;
+ u32 mac_in_errs[4];
+ u32 hdr_in_errs[4];
+ u32 tcp_in_errs[4];
+ u32 tnl_cong_drops[4];
+ u32 ofld_chan_drops[4];
+ u32 tnl_tx_drops[4];
+ u32 ofld_vlan_drops[4];
+ u32 tcp6_in_errs[4];
+ u32 ofld_no_neigh;
+ u32 ofld_cong_defer;
+};
+
+struct tp_cpl_stats {
+ u32 req[4];
+ u32 rsp[4];
+};
+
+struct tp_rdma_stats {
+ u32 rqe_dfr_pkt;
+ u32 rqe_dfr_mod;
};
struct sge_params {
@@ -224,7 +248,6 @@ struct sge_params {
};
struct tp_params {
- unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
unsigned int la_mask; /* what events are recorded by TP LA */
unsigned short tx_modq_map; /* TX modulation scheduler queue to */
@@ -259,6 +282,7 @@ struct vpd_params {
u8 sn[SERNUM_LEN + 1];
u8 id[ID_LEN + 1];
u8 pn[PN_LEN + 1];
+ u8 na[MACADDR_LEN + 1];
};
struct pci_params {
@@ -273,6 +297,7 @@ struct pci_params {
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
+#define CHELSIO_T6 0x6
enum chip_type {
T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
@@ -284,6 +309,10 @@ enum chip_type {
T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
T5_FIRST_REV = T5_A0,
T5_LAST_REV = T5_A1,
+
+ T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
+ T6_FIRST_REV = T6_A0,
+ T6_LAST_REV = T6_A0,
};
struct devlog_params {
@@ -292,6 +321,15 @@ struct devlog_params {
u32 size; /* size of log */
};
+/* Stores chip specific parameters */
+struct arch_specific_params {
+ u8 nchan;
+ u16 mps_rplc_size;
+ u16 vfcount;
+ u32 sge_fl_db;
+ u16 mps_tcam_size;
+};
+
struct adapter_params {
struct sge_params sge;
struct tp_params tp;
@@ -317,6 +355,7 @@ struct adapter_params {
unsigned char nports; /* # of ethernet ports */
unsigned char portvec;
enum chip_type chip; /* chip code */
+ struct arch_specific_params arch; /* chip specific params */
unsigned char offload;
unsigned char bypass;
@@ -328,6 +367,17 @@ struct adapter_params {
unsigned int max_ird_adapter; /* Max read depth per adapter */
};
+/* State needed to monitor the forward progress of SGE Ingress DMA activities
+ * and possible hangs.
+ */
+struct sge_idma_monitor_state {
+ unsigned int idma_1s_thresh; /* 1s threshold in Core Clock ticks */
+ unsigned int idma_stalled[2]; /* synthesized stalled timers in HZ */
+ unsigned int idma_state[2]; /* IDMA Hang detect state */
+ unsigned int idma_qid[2]; /* IDMA Hung Ingress Queue ID */
+ unsigned int idma_warn[2]; /* time to warning in HZ */
+};
+
#include "t4fw_api.h"
#define FW_VERSION(chip) ( \
@@ -421,6 +471,7 @@ struct port_info {
u8 rss_mode;
struct link_config link_cfg;
u16 *rss;
+ struct port_stats stats_base;
#ifdef CONFIG_CHELSIO_T4_DCB
struct port_dcb_info dcb; /* Data Center Bridging support */
#endif
@@ -630,12 +681,7 @@ struct sge {
u32 fl_align; /* response queue message alignment */
u32 fl_starve_thres; /* Free List starvation threshold */
- /* State variables for detecting an SGE Ingress DMA hang */
- unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
- unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
- unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
- unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
-
+ struct sge_idma_monitor_state idma_monitor;
unsigned int egr_start;
unsigned int egr_sz;
unsigned int ingr_start;
@@ -644,6 +690,7 @@ struct sge {
struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
unsigned long *starving_fl;
unsigned long *txq_maperr;
+ unsigned long *blocked_fl;
struct timer_list rx_timer; /* refills starving FLs */
struct timer_list tx_timer; /* checks Tx queues */
};
@@ -665,6 +712,12 @@ struct l2t_data;
#endif
+struct doorbell_stats {
+ u32 db_drop;
+ u32 db_empty;
+ u32 db_full;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -672,7 +725,7 @@ struct adapter {
struct pci_dev *pdev;
struct device *pdev_dev;
unsigned int mbox;
- unsigned int fn;
+ unsigned int pf;
unsigned int flags;
enum chip_type chip;
@@ -682,13 +735,12 @@ struct adapter {
struct cxgb4_virt_res vres;
unsigned int swintr;
- unsigned int wol;
-
struct {
unsigned short vec;
char desc[IFNAMSIZ + 10];
} msix_info[MAX_INGQ + 1];
+ struct doorbell_stats db_stats;
struct sge sge;
struct net_device *port[MAX_NPORTS];
@@ -843,6 +895,16 @@ enum {
VLAN_REWRITE
};
+static inline int is_offload(const struct adapter *adap)
+{
+ return adap->params.offload;
+}
+
+static inline int is_t6(enum chip_type chip)
+{
+ return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6;
+}
+
static inline int is_t5(enum chip_type chip)
{
return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
@@ -887,6 +949,22 @@ static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
}
/**
+ * t4_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @port_idx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW. Called by the common
+ * code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx,
+ u8 hw_addr[])
+{
+ ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr);
+ ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
+}
+
+/**
* netdev2pinfo - return the port_info structure associated with a net_device
* @dev: the netdev
*
@@ -1055,7 +1133,7 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct net_device *dev, int intr_idx,
- struct sge_fl *fl, rspq_handler_t hnd);
+ struct sge_fl *fl, rspq_handler_t hnd, int cong);
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *netdevq,
unsigned int iqid);
@@ -1095,6 +1173,19 @@ static inline int is_bypass_device(int device)
}
}
+static inline int is_10gbt_device(int device)
+{
+ /* this should be set based upon device capabilities */
+ switch (device) {
+ case 0x4409:
+ case 0x4486:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
{
return adap->params.vpd.cclk / 1000;
@@ -1117,9 +1208,19 @@ static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
u32 val);
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl, bool sleep_ok, int timeout);
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
void *rpl, bool sleep_ok);
+static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
+ const void *cmd, int size, void *rpl,
+ int timeout)
+{
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
+ timeout);
+}
+
static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
int size, void *rpl)
{
@@ -1147,10 +1248,14 @@ void t4_intr_disable(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
int t4_wait_dev_ready(void __iomem *regs);
-int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc);
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
+u32 t4_read_pcie_cfg4(struct adapter *adap, int reg);
+u32 t4_get_util_window(struct adapter *adap);
+void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window);
+
#define T4_MEMORY_WRITE 0
#define T4_MEMORY_READ 1
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
@@ -1165,10 +1270,16 @@ unsigned int t4_get_regs_len(struct adapter *adapter);
void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
-int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+int t4_load_phy_fw(struct adapter *adap,
+ int win, spinlock_t *lock,
+ int (*phy_fw_version)(const u8 *, size_t),
+ const u8 *phy_fw_data, size_t phy_fw_size);
+int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver);
int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
const u8 *fw_data, unsigned int size, int force);
@@ -1182,7 +1293,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
int t4_prep_adapter(struct adapter *adapter);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
-int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+int t4_bar2_sge_qregs(struct adapter *adapter,
unsigned int qid,
enum t4_bar2_qtype qtype,
u64 *pbar2_qoffset,
@@ -1195,12 +1306,15 @@ int t4_init_devlog_params(struct adapter *adapter);
int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
+int t4_init_rss_mode(struct adapter *adap, int mbox);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
unsigned int flags);
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ unsigned int flags, unsigned int defq);
int t4_read_rss(struct adapter *adapter, u16 *entries);
void t4_read_rss_key(struct adapter *adapter, u32 *key);
void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
@@ -1211,10 +1325,7 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
u32 t4_read_rss_pf_map(struct adapter *adapter);
u32 t4_read_rss_pf_mask(struct adapter *adapter);
-int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
- u64 *parity);
-int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
- u64 *parity);
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx);
void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
@@ -1229,13 +1340,23 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+ struct port_stats *stats,
+ struct port_stats *offset);
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
unsigned int mask, unsigned int val);
void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st);
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st);
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st);
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+ struct tp_fcoe_stats *st);
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta);
@@ -1259,13 +1380,16 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val);
+int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val, int rw);
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int nparams, const u32 *params,
+ const u32 *val, int timeout);
int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
const u32 *val);
-int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
- unsigned int pf, unsigned int vf,
- unsigned int nparams, const u32 *params,
- const u32 *val);
int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
unsigned int rxqi, unsigned int rxq, unsigned int tc,
@@ -1274,6 +1398,9 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
unsigned int *rss_size);
+int t4_free_vi(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int viid);
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok);
@@ -1303,6 +1430,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
void t4_db_full(struct adapter *adapter);
void t4_db_dropped(struct adapter *adapter);
@@ -1310,4 +1438,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
void t4_free_mem(void *addr);
+void t4_idma_monitor_init(struct adapter *adapter,
+ struct sge_idma_monitor_state *idma);
+void t4_idma_monitor(struct adapter *adapter,
+ struct sge_idma_monitor_state *idma,
+ int hz, int ticks);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 371f75e782e5..3719807efddd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -1084,41 +1084,89 @@ static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
static int mps_tcam_show(struct seq_file *seq, void *v)
{
- if (v == SEQ_START_TOKEN)
- seq_puts(seq, "Idx Ethernet address Mask Vld Ports PF"
- " VF Replication "
- "P0 P1 P2 P3 ML\n");
- else {
+ struct adapter *adap = seq->private;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ if (v == SEQ_START_TOKEN) {
+ if (adap->params.arch.mps_rplc_size > 128)
+ seq_puts(seq, "Idx Ethernet address Mask "
+ "Vld Ports PF VF "
+ "Replication "
+ " P0 P1 P2 P3 ML\n");
+ else
+ seq_puts(seq, "Idx Ethernet address Mask "
+ "Vld Ports PF VF Replication"
+ " P0 P1 P2 P3 ML\n");
+ } else {
u64 mask;
u8 addr[ETH_ALEN];
- struct adapter *adap = seq->private;
+ bool replicate;
unsigned int idx = (uintptr_t)v - 2;
- u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
- u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
- u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
- u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
- u32 rplc[4] = {0, 0, 0, 0};
+ u64 tcamy, tcamx, val;
+ u32 cls_lo, cls_hi, ctl;
+ u32 rplc[8] = {0};
+
+ if (chip_ver > CHELSIO_T5) {
+ /* CtlCmdType - 0: Read, 1: Write
+ * CtlTcamSel - 0: TCAM0, 1: TCAM1
+ * CtlXYBitSel- 0: Y bit, 1: X bit
+ */
+
+ /* Read tcamy */
+ ctl = CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
+ if (idx < 256)
+ ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
+ else
+ ctl |= CTLTCAMINDEX_V(idx - 256) |
+ CTLTCAMSEL_V(1);
+ t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+ val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
+ tcamy = DMACH_G(val) << 32;
+ tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+
+ /* Read tcamx. Change the control param */
+ ctl |= CTLXYBITSEL_V(1);
+ t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+ val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
+ tcamx = DMACH_G(val) << 32;
+ tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+ } else {
+ tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
+ tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
+ }
+
+ cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
+ cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
if (tcamx & tcamy) {
seq_printf(seq, "%3u -\n", idx);
goto out;
}
- if (cls_lo & REPLICATE_F) {
+ rplc[0] = rplc[1] = rplc[2] = rplc[3] = 0;
+ if (chip_ver > CHELSIO_T5)
+ replicate = (cls_lo & T6_REPLICATE_F);
+ else
+ replicate = (cls_lo & REPLICATE_F);
+
+ if (replicate) {
struct fw_ldst_cmd ldst_cmd;
int ret;
+ struct fw_ldst_mps_rplc mps_rplc;
+ u32 ldst_addrspc;
memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_addrspc =
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS);
ldst_cmd.op_to_addrspace =
htonl(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F |
FW_CMD_READ_F |
- FW_LDST_CMD_ADDRSPACE_V(
- FW_LDST_ADDRSPC_MPS));
+ ldst_addrspc);
ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
- ldst_cmd.u.mps.fid_ctl =
+ ldst_cmd.u.mps.rplc.fid_idx =
htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
- FW_LDST_CMD_CTL_V(idx));
+ FW_LDST_CMD_IDX_V(idx));
ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
sizeof(ldst_cmd), &ldst_cmd);
if (ret)
@@ -1126,30 +1174,69 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
"replication map for idx %d: %d\n",
idx, -ret);
else {
- rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0);
- rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32);
- rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64);
- rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96);
+ mps_rplc = ldst_cmd.u.mps.rplc;
+ rplc[0] = ntohl(mps_rplc.rplc31_0);
+ rplc[1] = ntohl(mps_rplc.rplc63_32);
+ rplc[2] = ntohl(mps_rplc.rplc95_64);
+ rplc[3] = ntohl(mps_rplc.rplc127_96);
+ if (adap->params.arch.mps_rplc_size > 128) {
+ rplc[4] = ntohl(mps_rplc.rplc159_128);
+ rplc[5] = ntohl(mps_rplc.rplc191_160);
+ rplc[6] = ntohl(mps_rplc.rplc223_192);
+ rplc[7] = ntohl(mps_rplc.rplc255_224);
+ }
}
}
tcamxy2valmask(tcamx, tcamy, addr, &mask);
- seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx"
- "%3c %#x%4u%4d",
- idx, addr[0], addr[1], addr[2], addr[3], addr[4],
- addr[5], (unsigned long long)mask,
- (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi),
- PF_G(cls_lo),
- (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
- if (cls_lo & REPLICATE_F)
- seq_printf(seq, " %08x %08x %08x %08x",
- rplc[3], rplc[2], rplc[1], rplc[0]);
+ if (chip_ver > CHELSIO_T5)
+ seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012llx%3c %#x%4u%4d",
+ idx, addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5], (unsigned long long)mask,
+ (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
+ PORTMAP_G(cls_hi),
+ T6_PF_G(cls_lo),
+ (cls_lo & T6_VF_VALID_F) ?
+ T6_VF_G(cls_lo) : -1);
else
- seq_printf(seq, "%36c", ' ');
- seq_printf(seq, "%4u%3u%3u%3u %#x\n",
- SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
- SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
- (cls_lo >> MULTILISTEN0_S) & 0xf);
+ seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012llx%3c %#x%4u%4d",
+ idx, addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5], (unsigned long long)mask,
+ (cls_lo & SRAM_VLD_F) ? 'Y' : 'N',
+ PORTMAP_G(cls_hi),
+ PF_G(cls_lo),
+ (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
+
+ if (replicate) {
+ if (adap->params.arch.mps_rplc_size > 128)
+ seq_printf(seq, " %08x %08x %08x %08x "
+ "%08x %08x %08x %08x",
+ rplc[7], rplc[6], rplc[5], rplc[4],
+ rplc[3], rplc[2], rplc[1], rplc[0]);
+ else
+ seq_printf(seq, " %08x %08x %08x %08x",
+ rplc[3], rplc[2], rplc[1], rplc[0]);
+ } else {
+ if (adap->params.arch.mps_rplc_size > 128)
+ seq_printf(seq, "%72c", ' ');
+ else
+ seq_printf(seq, "%36c", ' ');
+ }
+
+ if (chip_ver > CHELSIO_T5)
+ seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+ T6_SRAM_PRIO0_G(cls_lo),
+ T6_SRAM_PRIO1_G(cls_lo),
+ T6_SRAM_PRIO2_G(cls_lo),
+ T6_SRAM_PRIO3_G(cls_lo),
+ (cls_lo >> T6_MULTILISTEN0_S) & 0xf);
+ else
+ seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+ SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
+ SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
+ (cls_lo >> MULTILISTEN0_S) & 0xf);
}
out: return 0;
}
@@ -1222,7 +1309,7 @@ static int sensors_show(struct seq_file *seq, void *v)
param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD));
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
param, val);
if (ret < 0 || val[0] == 0)
@@ -1416,6 +1503,9 @@ static int rss_config_show(struct seq_file *seq, void *v)
seq_printf(seq, " HashDelay: %3d\n", HASHDELAY_G(rssconf));
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
seq_printf(seq, " VfWrAddr: %3d\n", VFWRADDR_G(rssconf));
+ else
+ seq_printf(seq, " VfWrAddr: %3d\n",
+ T6_VFWRADDR_G(rssconf));
seq_printf(seq, " KeyMode: %s\n", keymode[KEYMODE_G(rssconf)]);
seq_printf(seq, " VfWrEn: %3s\n", yesno(rssconf & VFWREN_F));
seq_printf(seq, " KeyWrEn: %3s\n", yesno(rssconf & KEYWREN_F));
@@ -1634,14 +1724,14 @@ static int rss_vf_config_open(struct inode *inode, struct file *file)
struct adapter *adapter = inode->i_private;
struct seq_tab *p;
struct rss_vf_conf *vfconf;
- int vf;
+ int vf, vfcount = adapter->params.arch.vfcount;
- p = seq_open_tab(file, 128, sizeof(*vfconf), 1, rss_vf_config_show);
+ p = seq_open_tab(file, vfcount, sizeof(*vfconf), 1, rss_vf_config_show);
if (!p)
return -ENOMEM;
vfconf = (struct rss_vf_conf *)p->data;
- for (vf = 0; vf < 128; vf++) {
+ for (vf = 0; vf < vfcount; vf++) {
t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
&vfconf[vf].rss_vf_vfh);
}
@@ -1959,6 +2049,61 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
size_mb << 20);
}
+static int blocked_fl_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int len;
+ const struct adapter *adap = filp->private_data;
+ char *buf;
+ ssize_t size = (adap->sge.egr_sz + 3) / 4 +
+ adap->sge.egr_sz / 32 + 2; /* includes ,/\n/\0 */
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len = snprintf(buf, size - 1, "%*pb\n",
+ adap->sge.egr_sz, adap->sge.blocked_fl);
+ len += sprintf(buf + len, "\n");
+ size = simple_read_from_buffer(ubuf, count, ppos, buf, len);
+ t4_free_mem(buf);
+ return size;
+}
+
+static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int err;
+ unsigned long *t;
+ struct adapter *adap = filp->private_data;
+
+ t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
+ if (err)
+ return err;
+
+ bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
+ t4_free_mem(t);
+ return count;
+}
+
+static const struct file_operations blocked_fl_fops = {
+ .owner = THIS_MODULE,
+ .open = blocked_fl_open,
+ .read = blocked_fl_read,
+ .write = blocked_fl_write,
+ .llseek = generic_file_llseek,
+};
+
/* Add an array of Debug FS files.
*/
void add_debugfs_files(struct adapter *adap,
@@ -1978,7 +2123,7 @@ void add_debugfs_files(struct adapter *adap,
int t4_setup_debugfs(struct adapter *adap)
{
int i;
- u32 size;
+ u32 size = 0;
struct dentry *de;
static struct t4_debugfs_entry t4_debugfs_files[] = {
@@ -2022,6 +2167,7 @@ int t4_setup_debugfs(struct adapter *adap)
#if IS_ENABLED(CONFIG_IPV6)
{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
#endif
+ { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
@@ -2048,12 +2194,7 @@ int t4_setup_debugfs(struct adapter *adap)
size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size));
}
- if (is_t4(adap->params.chip)) {
- size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
- if (i & EXT_MEM_ENABLE_F)
- add_debugfs_mem(adap, "mc", MEM_MC,
- EXT_MEM_SIZE_G(size));
- } else {
+ if (is_t5(adap->params.chip)) {
if (i & EXT_MEM0_ENABLE_F) {
size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
add_debugfs_mem(adap, "mc0", MEM_MC0,
@@ -2064,6 +2205,11 @@ int t4_setup_debugfs(struct adapter *adap)
add_debugfs_mem(adap, "mc1", MEM_MC1,
EXT_MEM1_SIZE_G(size));
}
+ } else {
+ if (i & EXT_MEM_ENABLE_F)
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+ add_debugfs_mem(adap, "mc", MEM_MC,
+ EXT_MEM_SIZE_G(size));
}
de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 10d82b51d7ef..687acf71fa15 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -108,15 +108,82 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
"VLANinsertions ",
"GROpackets ",
"GROmerged ",
- "WriteCoalSuccess ",
- "WriteCoalFail ",
+};
+
+static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
+ "db_drop ",
+ "db_full ",
+ "db_empty ",
+ "tcp_ipv4_out_rsts ",
+ "tcp_ipv4_in_segs ",
+ "tcp_ipv4_out_segs ",
+ "tcp_ipv4_retrans_segs ",
+ "tcp_ipv6_out_rsts ",
+ "tcp_ipv6_in_segs ",
+ "tcp_ipv6_out_segs ",
+ "tcp_ipv6_retrans_segs ",
+ "usm_ddp_frames ",
+ "usm_ddp_octets ",
+ "usm_ddp_drops ",
+ "rdma_no_rqe_mod_defer ",
+ "rdma_no_rqe_pkt_defer ",
+ "tp_err_ofld_no_neigh ",
+ "tp_err_ofld_cong_defer ",
+ "write_coal_success ",
+ "write_coal_fail ",
+};
+
+static char channel_stats_strings[][ETH_GSTRING_LEN] = {
+ "--------Channel--------- ",
+ "tp_cpl_requests ",
+ "tp_cpl_responses ",
+ "tp_mac_in_errs ",
+ "tp_hdr_in_errs ",
+ "tp_tcp_in_errs ",
+ "tp_tcp6_in_errs ",
+ "tp_tnl_cong_drops ",
+ "tp_tnl_tx_drops ",
+ "tp_ofld_vlan_drops ",
+ "tp_ofld_chan_drops ",
+ "fcoe_octets_ddp ",
+ "fcoe_frames_ddp ",
+ "fcoe_frames_drop ",
+};
+
+static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
+ "-------Loopback----------- ",
+ "octets_ok ",
+ "frames_ok ",
+ "bcast_frames ",
+ "mcast_frames ",
+ "ucast_frames ",
+ "error_frames ",
+ "frames_64 ",
+ "frames_65_to_127 ",
+ "frames_128_to_255 ",
+ "frames_256_to_511 ",
+ "frames_512_to_1023 ",
+ "frames_1024_to_1518 ",
+ "frames_1519_to_max ",
+ "frames_dropped ",
+ "bg0_frames_dropped ",
+ "bg1_frames_dropped ",
+ "bg2_frames_dropped ",
+ "bg3_frames_dropped ",
+ "bg0_frames_trunc ",
+ "bg1_frames_trunc ",
+ "bg2_frames_trunc ",
+ "bg3_frames_trunc ",
};
static int get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(stats_strings);
+ return ARRAY_SIZE(stats_strings) +
+ ARRAY_SIZE(adapter_stats_strings) +
+ ARRAY_SIZE(channel_stats_strings) +
+ ARRAY_SIZE(loopback_stats_strings);
default:
return -EOPNOTSUPP;
}
@@ -168,8 +235,18 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
- if (stringset == ETH_SS_STATS)
+ if (stringset == ETH_SS_STATS) {
memcpy(data, stats_strings, sizeof(stats_strings));
+ data += sizeof(stats_strings);
+ memcpy(data, adapter_stats_strings,
+ sizeof(adapter_stats_strings));
+ data += sizeof(adapter_stats_strings);
+ memcpy(data, channel_stats_strings,
+ sizeof(channel_stats_strings));
+ data += sizeof(channel_stats_strings);
+ memcpy(data, loopback_stats_strings,
+ sizeof(loopback_stats_strings));
+ }
}
/* port stats maintained per queue of the port. They should be in the same
@@ -185,6 +262,45 @@ struct queue_port_stats {
u64 gro_merged;
};
+struct adapter_stats {
+ u64 db_drop;
+ u64 db_full;
+ u64 db_empty;
+ u64 tcp_v4_out_rsts;
+ u64 tcp_v4_in_segs;
+ u64 tcp_v4_out_segs;
+ u64 tcp_v4_retrans_segs;
+ u64 tcp_v6_out_rsts;
+ u64 tcp_v6_in_segs;
+ u64 tcp_v6_out_segs;
+ u64 tcp_v6_retrans_segs;
+ u64 frames;
+ u64 octets;
+ u64 drops;
+ u64 rqe_dfr_mod;
+ u64 rqe_dfr_pkt;
+ u64 ofld_no_neigh;
+ u64 ofld_cong_defer;
+ u64 wc_success;
+ u64 wc_fail;
+};
+
+struct channel_stats {
+ u64 cpl_req;
+ u64 cpl_rsp;
+ u64 mac_in_errs;
+ u64 hdr_in_errs;
+ u64 tcp_in_errs;
+ u64 tcp6_in_errs;
+ u64 tnl_cong_drops;
+ u64 tnl_tx_drops;
+ u64 ofld_vlan_drops;
+ u64 ofld_chan_drops;
+ u64 octets_ddp;
+ u64 frames_ddp;
+ u64 frames_drop;
+};
+
static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
@@ -205,30 +321,121 @@ static void collect_sge_port_stats(const struct adapter *adap,
}
}
+static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
+{
+ struct tp_tcp_stats v4, v6;
+ struct tp_rdma_stats rdma_stats;
+ struct tp_err_stats err_stats;
+ struct tp_usm_stats usm_stats;
+ u64 val1, val2;
+
+ memset(s, 0, sizeof(*s));
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_tcp_stats(adap, &v4, &v6);
+ t4_tp_get_rdma_stats(adap, &rdma_stats);
+ t4_get_usm_stats(adap, &usm_stats);
+ t4_tp_get_err_stats(adap, &err_stats);
+ spin_unlock(&adap->stats_lock);
+
+ s->db_drop = adap->db_stats.db_drop;
+ s->db_full = adap->db_stats.db_full;
+ s->db_empty = adap->db_stats.db_empty;
+
+ s->tcp_v4_out_rsts = v4.tcp_out_rsts;
+ s->tcp_v4_in_segs = v4.tcp_in_segs;
+ s->tcp_v4_out_segs = v4.tcp_out_segs;
+ s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
+ s->tcp_v6_out_rsts = v6.tcp_out_rsts;
+ s->tcp_v6_in_segs = v6.tcp_in_segs;
+ s->tcp_v6_out_segs = v6.tcp_out_segs;
+ s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
+
+ if (is_offload(adap)) {
+ s->frames = usm_stats.frames;
+ s->octets = usm_stats.octets;
+ s->drops = usm_stats.drops;
+ s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
+ s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
+ }
+
+ s->ofld_no_neigh = err_stats.ofld_no_neigh;
+ s->ofld_cong_defer = err_stats.ofld_cong_defer;
+
+ if (!is_t4(adap->params.chip)) {
+ int v;
+
+ v = t4_read_reg(adap, SGE_STAT_CFG_A);
+ if (STATSOURCE_T5_G(v) == 7) {
+ val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
+ val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
+ s->wc_success = val1 - val2;
+ s->wc_fail = val2;
+ }
+ }
+}
+
+static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
+ u8 i)
+{
+ struct tp_cpl_stats cpl_stats;
+ struct tp_err_stats err_stats;
+ struct tp_fcoe_stats fcoe_stats;
+
+ memset(s, 0, sizeof(*s));
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_cpl_stats(adap, &cpl_stats);
+ t4_tp_get_err_stats(adap, &err_stats);
+ t4_get_fcoe_stats(adap, i, &fcoe_stats);
+ spin_unlock(&adap->stats_lock);
+
+ s->cpl_req = cpl_stats.req[i];
+ s->cpl_rsp = cpl_stats.rsp[i];
+ s->mac_in_errs = err_stats.mac_in_errs[i];
+ s->hdr_in_errs = err_stats.hdr_in_errs[i];
+ s->tcp_in_errs = err_stats.tcp_in_errs[i];
+ s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
+ s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
+ s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
+ s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
+ s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
+ s->octets_ddp = fcoe_stats.octets_ddp;
+ s->frames_ddp = fcoe_stats.frames_ddp;
+ s->frames_drop = fcoe_stats.frames_drop;
+}
+
static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
u64 *data)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- u32 val1, val2;
+ struct lb_port_stats s;
+ int i;
+ u64 *p0;
- t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
+ t4_get_port_stats_offset(adapter, pi->tx_chan,
+ (struct port_stats *)data,
+ &pi->stats_base);
data += sizeof(struct port_stats) / sizeof(u64);
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
data += sizeof(struct queue_port_stats) / sizeof(u64);
- if (!is_t4(adapter->params.chip)) {
- t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
- val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
- val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
- *data = val1 - val2;
- data++;
- *data = val2;
- data++;
- } else {
- memset(data, 0, 2 * sizeof(u64));
- *data += 2;
- }
+ collect_adapter_stats(adapter, (struct adapter_stats *)data);
+ data += sizeof(struct adapter_stats) / sizeof(u64);
+
+ *data++ = (u64)pi->port_id;
+ collect_channel_stats(adapter, (struct channel_stats *)data,
+ pi->port_id);
+ data += sizeof(struct channel_stats) / sizeof(u64);
+
+ *data++ = (u64)pi->port_id;
+ memset(&s, 0, sizeof(s));
+ t4_get_lb_stats(adapter, pi->port_id, &s);
+
+ p0 = &s.octets;
+ for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
+ *data++ = (unsigned long long)*p0++;
}
static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -250,7 +457,7 @@ static int restart_autoneg(struct net_device *dev)
return -EAGAIN;
if (p->link_cfg.autoneg != AUTONEG_ENABLE)
return -EINVAL;
- t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
+ t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
return 0;
}
@@ -267,7 +474,7 @@ static int identify_port(struct net_device *dev,
else
return -EINVAL;
- return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
+ return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
}
static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
@@ -439,7 +646,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
lc->autoneg = cmd->autoneg;
if (netif_running(dev))
- return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+ return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
lc);
return 0;
}
@@ -472,7 +679,7 @@ static int set_pauseparam(struct net_device *dev,
if (epause->tx_pause)
lc->requested_fc |= PAUSE_TX;
if (netif_running(dev))
- return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+ return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
lc);
return 0;
}
@@ -578,7 +785,7 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
c->rx_coalesce_usecs = qtimer_val(adap, rq);
- c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
+ c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
adap->sge.counter_val[rq->pktcnt_idx] : 0;
c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
return 0;
@@ -617,7 +824,7 @@ static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
*/
static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
{
- int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+ int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
@@ -626,7 +833,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
{
- int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+ int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
@@ -669,8 +876,8 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
aligned_offset = eeprom->offset & ~3;
aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
- if (adapter->fn > 0) {
- u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
+ if (adapter->pf > 0) {
+ u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
if (aligned_offset < start ||
aligned_offset + aligned_len > start + EEPROMPFSIZE)
@@ -740,37 +947,6 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
return ret;
}
-#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
-#define BCAST_CRC 0xa0ccc1a6
-
-static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- wol->supported = WAKE_BCAST | WAKE_MAGIC;
- wol->wolopts = netdev2adap(dev)->wol;
- memset(&wol->sopass, 0, sizeof(wol->sopass));
-}
-
-static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- int err = 0;
- struct port_info *pi = netdev_priv(dev);
-
- if (wol->wolopts & ~WOL_SUPPORTED)
- return -EINVAL;
- t4_wol_magic_enable(pi->adapter, pi->tx_chan,
- (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
- if (wol->wolopts & WAKE_BCAST) {
- err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
- ~0ULL, 0, false);
- if (!err)
- err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
- ~6ULL, ~0ULL, BCAST_CRC, true);
- } else {
- t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
- }
- return err;
-}
-
static u32 get_rss_table_size(struct net_device *dev)
{
const struct port_info *pi = netdev_priv(dev);
@@ -900,8 +1076,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_ethtool_stats = get_stats,
.get_regs_len = get_regs_len,
.get_regs = get_regs,
- .get_wol = get_wol,
- .set_wol = set_wol,
.get_rxnfc = get_rxnfc,
.get_rxfh_indir_size = get_rss_table_size,
.get_rxfh = get_rss_table,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 803d91beec6f..0e27f2266e6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -135,8 +135,14 @@ struct filter_entry {
#define FW4_FNAME "cxgb4/t4fw.bin"
#define FW5_FNAME "cxgb4/t5fw.bin"
+#define FW6_FNAME "cxgb4/t6fw.bin"
#define FW4_CFNAME "cxgb4/t4-config.txt"
#define FW5_CFNAME "cxgb4/t5-config.txt"
+#define FW6_CFNAME "cxgb4/t6-config.txt"
+#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
+#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
+#define PHY_AQ1202_DEVICEID 0x4409
+#define PHY_BCM84834_DEVICEID 0x4486
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
@@ -318,8 +324,9 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
* level") we need to issue the Set Parameters Commannd
* without sleeping (timeout < 0).
*/
- err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
- &name, &value);
+ err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
+ &name, &value,
+ -FW_CMD_MAX_TIMEOUT);
if (err)
dev_err(adap->pdev_dev,
@@ -382,7 +389,7 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
int uc_cnt = netdev_uc_count(dev);
int mc_cnt = netdev_mc_count(dev);
const struct port_info *pi = netdev_priv(dev);
- unsigned int mb = pi->adapter->fn;
+ unsigned int mb = pi->adapter->pf;
/* first do the secondary unicast addresses */
netdev_for_each_uc_addr(ha, dev) {
@@ -439,7 +446,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
ret = set_addr_filters(dev, sleep_ok);
if (ret == 0)
- ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
+ ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu,
(dev->flags & IFF_PROMISC) ? 1 : 0,
(dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
sleep_ok);
@@ -456,7 +463,7 @@ static int link_start(struct net_device *dev)
{
int ret;
struct port_info *pi = netdev_priv(dev);
- unsigned int mb = pi->adapter->fn;
+ unsigned int mb = pi->adapter->pf;
/*
* We do not set address filters and promiscuity here, the stack does
@@ -474,7 +481,7 @@ static int link_start(struct net_device *dev)
}
}
if (ret == 0)
- ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
+ ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
&pi->link_cfg);
if (ret == 0) {
local_bh_disable();
@@ -856,23 +863,39 @@ static void free_msix_queue_irqs(struct adapter *adap)
*
* Sets up the portion of the HW RSS table for the port's VI to distribute
* packets to the Rx queues in @queues.
+ * Should never be called before setting up sge eth rx queues
*/
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
{
u16 *rss;
int i, err;
- const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
+ struct adapter *adapter = pi->adapter;
+ const struct sge_eth_rxq *rxq;
+ rxq = &adapter->sge.ethrxq[pi->first_qset];
rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
if (!rss)
return -ENOMEM;
/* map the queue indices to queue ids */
for (i = 0; i < pi->rss_size; i++, queues++)
- rss[i] = q[*queues].rspq.abs_id;
+ rss[i] = rxq[*queues].rspq.abs_id;
- err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
+ err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
pi->rss_size, rss, pi->rss_size);
+ /* If Tunnel All Lookup isn't specified in the global RSS
+ * Configuration, then we need to specify a default Ingress
+ * Queue for any ingress packets which aren't hashed. We'll
+ * use our first ingress queue ...
+ */
+ if (!err)
+ err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
+ FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_UDPEN_F,
+ rss[0]);
kfree(rss);
return err;
}
@@ -885,11 +908,15 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
*/
static int setup_rss(struct adapter *adap)
{
- int i, err;
+ int i, j, err;
for_each_port(adap, i) {
const struct port_info *pi = adap2pinfo(adap, i);
+ /* Fill default values with equal distribution */
+ for (j = 0; j < pi->rss_size; j++)
+ pi->rss[j] = j % pi->nqsets;
+
err = cxgb4_write_rss(pi, pi->rss);
if (err)
return err;
@@ -977,7 +1004,7 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[i / per_chan],
msi_idx, q->fl.size ? &q->fl : NULL,
- uldrx_handler);
+ uldrx_handler, 0);
if (err)
return err;
memset(&q->stats, 0, sizeof(q->stats));
@@ -1007,7 +1034,7 @@ static int setup_sge_queues(struct adapter *adap)
msi_idx = 1; /* vector 0 is for non-queue interrupts */
else {
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
- NULL, NULL);
+ NULL, NULL, -1);
if (err)
return err;
msi_idx = -((int)s->intrq.abs_id + 1);
@@ -1027,7 +1054,7 @@ static int setup_sge_queues(struct adapter *adap)
* new/deleted queues.
*/
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
- msi_idx, NULL, fwevtq_handler);
+ msi_idx, NULL, fwevtq_handler, -1);
if (err) {
freeout: t4_free_sge_resources(adap);
return err;
@@ -1044,7 +1071,9 @@ freeout: t4_free_sge_resources(adap);
msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
msi_idx, &q->fl,
- t4_ethrx_handler);
+ t4_ethrx_handler,
+ t4_get_mps_bg_map(adap,
+ pi->tx_chan));
if (err)
goto freeout;
q->rspq.idx = j;
@@ -1324,11 +1353,6 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return fallback(dev, skb) % dev->real_num_tx_queues;
}
-static inline int is_offload(const struct adapter *adap)
-{
- return adap->params.offload;
-}
-
static int closest_timer(const struct sge *s, int time)
{
int i, delta, match = 0, min_delta = INT_MAX;
@@ -1389,8 +1413,8 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
FW_PARAMS_PARAM_X_V(
FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
- err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
- &new_idx);
+ err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+ &v, &new_idx);
if (err)
return err;
}
@@ -1398,7 +1422,7 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
}
us = us == 0 ? 6 : closest_timer(&adap->sge, us);
- q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
+ q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
return 0;
}
@@ -1411,7 +1435,7 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
return 0;
- err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+ err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
-1, -1, -1,
!!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (unlikely(err))
@@ -1694,7 +1718,7 @@ static int tid_init(struct tid_info *t)
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
/* Reserve stid 0 for T4/T5 adapters */
if (!t->stid_base &&
- (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
+ (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
__set_bit(0, t->stid_bmap);
return 0;
@@ -1983,11 +2007,8 @@ EXPORT_SYMBOL(cxgb4_iscsi_init);
int cxgb4_flush_eq_cache(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
- int ret;
- ret = t4_fwaddrspace_write(adap, adap->mbox,
- 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
- return ret;
+ return t4_sge_ctxt_flush(adap, adap->mbox);
}
EXPORT_SYMBOL(cxgb4_flush_eq_cache);
@@ -2042,25 +2063,6 @@ out:
}
EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
-void cxgb4_disable_db_coalescing(struct net_device *dev)
-{
- struct adapter *adap;
-
- adap = netdev2adap(dev);
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
- NOCOALESCE_F);
-}
-EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
-
-void cxgb4_enable_db_coalescing(struct net_device *dev)
-{
- struct adapter *adap;
-
- adap = netdev2adap(dev);
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
-}
-EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
-
int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
{
struct adapter *adap;
@@ -2100,10 +2102,7 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
if (offset < mc0_end) {
memtype = MEM_MC0;
memaddr = offset - edc1_end;
- } else if (is_t4(adap->params.chip)) {
- /* T4 only has a single memory channel */
- goto err;
- } else {
+ } else if (is_t5(adap->params.chip)) {
size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
mc1_size = EXT_MEM1_SIZE_G(size) << 20;
mc1_end = mc0_end + mc1_size;
@@ -2114,6 +2113,9 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
/* offset beyond the end of any memory */
goto err;
}
+ } else {
+ /* T4/T6 only has a single memory channel */
+ goto err;
}
}
@@ -2148,7 +2150,7 @@ int cxgb4_bar2_sge_qregs(struct net_device *dev,
u64 *pbar2_qoffset,
unsigned int *pbar2_qid)
{
- return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
+ return t4_bar2_sge_qregs(netdev2adap(dev),
qid,
(qtype == CXGB4_BAR2_QTYPE_EGRESS
? T4_BAR2_QTYPE_EGRESS
@@ -2278,9 +2280,13 @@ static void process_db_full(struct work_struct *work)
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
- t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
+ else
+ t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+ DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
}
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -2342,7 +2348,7 @@ static void process_db_drop(struct work_struct *work)
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
- } else {
+ } else if (is_t5(adap->params.chip)) {
u32 dropped_db = t4_read_reg(adap, 0x010ac);
u16 qid = (dropped_db >> 15) & 0x1ffff;
u16 pidx_inc = dropped_db & 0x1fff;
@@ -2350,7 +2356,7 @@ static void process_db_drop(struct work_struct *work)
unsigned int bar2_qid;
int ret;
- ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
+ ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
&bar2_qoffset, &bar2_qid);
if (ret)
dev_err(adap->pdev_dev, "doorbell drop recovery: "
@@ -2363,7 +2369,8 @@ static void process_db_drop(struct work_struct *work)
t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
}
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
}
void t4_db_full(struct adapter *adap)
@@ -2393,7 +2400,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
unsigned short i;
lli.pdev = adap->pdev;
- lli.pf = adap->fn;
+ lli.pf = adap->pf;
lli.l2t = adap->l2t;
lli.tids = &adap->tids;
lli.ports = adap->port;
@@ -2432,6 +2439,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.max_ordird_qp = adap->params.max_ordird_qp;
lli.max_ird_adapter = adap->params.max_ird_adapter;
lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
+ lli.nodeid = dev_to_node(adap->pdev_dev);
handle = ulds[uld].add(&lli);
if (IS_ERR(handle)) {
@@ -2729,7 +2737,7 @@ static int cxgb_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
+ return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
}
/* Return an error number if the indicated filter isn't writable ...
@@ -2873,7 +2881,8 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
spin_unlock(&adapter->stats_lock);
return ns;
}
- t4_get_port_stats(adapter, p->tx_chan, &stats);
+ t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
+ &p->stats_base);
spin_unlock(&adapter->stats_lock);
ns->tx_bytes = stats.tx_octets;
@@ -2932,7 +2941,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
} else
return -EINVAL;
- mbox = pi->adapter->fn;
+ mbox = pi->adapter->pf;
if (cmd == SIOCGMIIREG)
ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
data->reg_num, &data->val_out);
@@ -2959,7 +2968,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
return -EINVAL;
- ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
+ ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
-1, -1, -1, true);
if (!ret)
dev->mtu = new_mtu;
@@ -2975,7 +2984,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
+ ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
pi->xact_addr_filt, addr->sa_data, true, true);
if (ret < 0)
return ret;
@@ -3034,86 +3043,11 @@ void t4_fatal_err(struct adapter *adap)
dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
}
-/* Return the specified PCI-E Configuration Space register from our Physical
- * Function. We try first via a Firmware LDST Command since we prefer to let
- * the firmware own all of these registers, but if that fails we go for it
- * directly ourselves.
- */
-static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
-{
- struct fw_ldst_cmd ldst_cmd;
- u32 val;
- int ret;
-
- /* Construct and send the Firmware LDST Command to retrieve the
- * specified PCI-E Configuration Space register.
- */
- memset(&ldst_cmd, 0, sizeof(ldst_cmd));
- ldst_cmd.op_to_addrspace =
- htonl(FW_CMD_OP_V(FW_LDST_CMD) |
- FW_CMD_REQUEST_F |
- FW_CMD_READ_F |
- FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
- ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
- ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
- ldst_cmd.u.pcie.ctrl_to_fn =
- (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
- ldst_cmd.u.pcie.r = reg;
- ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
- &ldst_cmd);
-
- /* If the LDST Command suucceeded, exctract the returned register
- * value. Otherwise read it directly ourself.
- */
- if (ret == 0)
- val = ntohl(ldst_cmd.u.pcie.data[0]);
- else
- t4_hw_pci_read_cfg4(adap, reg, &val);
-
- return val;
-}
-
static void setup_memwin(struct adapter *adap)
{
- u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
+ u32 nic_win_base = t4_get_util_window(adap);
- if (is_t4(adap->params.chip)) {
- u32 bar0;
-
- /* Truncation intentional: we only read the bottom 32-bits of
- * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
- * mechanism to read BAR0 instead of using
- * pci_resource_start() because we could be operating from
- * within a Virtual Machine which is trapping our accesses to
- * our Configuration Space and we need to set up the PCI-E
- * Memory Window decoders with the actual addresses which will
- * be coming across the PCI-E link.
- */
- bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
- bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
- adap->t4_bar0 = bar0;
-
- mem_win0_base = bar0 + MEMWIN0_BASE;
- mem_win1_base = bar0 + MEMWIN1_BASE;
- mem_win2_base = bar0 + MEMWIN2_BASE;
- mem_win2_aperture = MEMWIN2_APERTURE;
- } else {
- /* For T5, only relative offset inside the PCIe BAR is passed */
- mem_win0_base = MEMWIN0_BASE;
- mem_win1_base = MEMWIN1_BASE;
- mem_win2_base = MEMWIN2_BASE_T5;
- mem_win2_aperture = MEMWIN2_APERTURE_T5;
- }
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
- mem_win0_base | BIR_V(0) |
- WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
- mem_win1_base | BIR_V(0) |
- WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
- mem_win2_base | BIR_V(0) |
- WINDOW_V(ilog2(mem_win2_aperture) - 10));
- t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
+ t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
}
static void setup_memwin_rdma(struct adapter *adap)
@@ -3147,7 +3081,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F);
c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
- ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
+ ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
if (ret < 0)
return ret;
@@ -3163,18 +3097,18 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
}
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
- ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
+ ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
if (ret < 0)
return ret;
- ret = t4_config_glbl_rss(adap, adap->fn,
+ ret = t4_config_glbl_rss(adap, adap->pf,
FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
if (ret < 0)
return ret;
- ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
+ ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
FW_CMD_CAP_PF);
if (ret < 0)
@@ -3218,7 +3152,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
}
/* get basic stuff going */
- return t4_early_init(adap, adap->fn);
+ return t4_early_init(adap, adap->pf);
}
/*
@@ -3274,6 +3208,142 @@ static int adap_init0_tweaks(struct adapter *adapter)
return 0;
}
+/* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
+ * unto themselves and they contain their own firmware to perform their
+ * tasks ...
+ */
+static int phy_aq1202_version(const u8 *phy_fw_data,
+ size_t phy_fw_size)
+{
+ int offset;
+
+ /* At offset 0x8 you're looking for the primary image's
+ * starting offset which is 3 Bytes wide
+ *
+ * At offset 0xa of the primary image, you look for the offset
+ * of the DRAM segment which is 3 Bytes wide.
+ *
+ * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
+ * wide
+ */
+ #define be16(__p) (((__p)[0] << 8) | (__p)[1])
+ #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
+ #define le24(__p) (le16(__p) | ((__p)[2] << 16))
+
+ offset = le24(phy_fw_data + 0x8) << 12;
+ offset = le24(phy_fw_data + offset + 0xa);
+ return be16(phy_fw_data + offset + 0x27e);
+
+ #undef be16
+ #undef le16
+ #undef le24
+}
+
+static struct info_10gbt_phy_fw {
+ unsigned int phy_fw_id; /* PCI Device ID */
+ char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
+ int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
+ int phy_flash; /* Has FLASH for PHY Firmware */
+} phy_info_array[] = {
+ {
+ PHY_AQ1202_DEVICEID,
+ PHY_AQ1202_FIRMWARE,
+ phy_aq1202_version,
+ 1,
+ },
+ {
+ PHY_BCM84834_DEVICEID,
+ PHY_BCM84834_FIRMWARE,
+ NULL,
+ 0,
+ },
+ { 0, NULL, NULL },
+};
+
+static struct info_10gbt_phy_fw *find_phy_info(int devid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
+ if (phy_info_array[i].phy_fw_id == devid)
+ return &phy_info_array[i];
+ }
+ return NULL;
+}
+
+/* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
+ * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
+ * we return a negative error number. If we transfer new firmware we return 1
+ * (from t4_load_phy_fw()). If we don't do anything we return 0.
+ */
+static int adap_init0_phy(struct adapter *adap)
+{
+ const struct firmware *phyf;
+ int ret;
+ struct info_10gbt_phy_fw *phy_info;
+
+ /* Use the device ID to determine which PHY file to flash.
+ */
+ phy_info = find_phy_info(adap->pdev->device);
+ if (!phy_info) {
+ dev_warn(adap->pdev_dev,
+ "No PHY Firmware file found for this PHY\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
+ * use that. The adapter firmware provides us with a memory buffer
+ * where we can load a PHY firmware file from the host if we want to
+ * override the PHY firmware File in flash.
+ */
+ ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
+ adap->pdev_dev);
+ if (ret < 0) {
+ /* For adapters without FLASH attached to PHY for their
+ * firmware, it's obviously a fatal error if we can't get the
+ * firmware to the adapter. For adapters with PHY firmware
+ * FLASH storage, it's worth a warning if we can't find the
+ * PHY Firmware but we'll neuter the error ...
+ */
+ dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
+ "/lib/firmware/%s, error %d\n",
+ phy_info->phy_fw_file, -ret);
+ if (phy_info->phy_flash) {
+ int cur_phy_fw_ver = 0;
+
+ t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+ dev_warn(adap->pdev_dev, "continuing with, on-adapter "
+ "FLASH copy, version %#x\n", cur_phy_fw_ver);
+ ret = 0;
+ }
+
+ return ret;
+ }
+
+ /* Load PHY Firmware onto adapter.
+ */
+ ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
+ phy_info->phy_fw_version,
+ (u8 *)phyf->data, phyf->size);
+ if (ret < 0)
+ dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
+ -ret);
+ else if (ret > 0) {
+ int new_phy_fw_ver = 0;
+
+ if (phy_info->phy_fw_version)
+ new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
+ phyf->size);
+ dev_info(adap->pdev_dev, "Successfully transferred PHY "
+ "Firmware /lib/firmware/%s, version %#x\n",
+ phy_info->phy_fw_file, new_phy_fw_ver);
+ }
+
+ release_firmware(phyf);
+
+ return ret;
+}
+
/*
* Attempt to initialize the adapter via a Firmware Configuration File.
*/
@@ -3298,6 +3368,16 @@ static int adap_init0_config(struct adapter *adapter, int reset)
goto bye;
}
+ /* If this is a 10Gb/s-BT adapter make sure the chip-external
+ * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
+ * to be performed after any global adapter RESET above since some
+ * PHYs only have local RAM copies of the PHY firmware.
+ */
+ if (is_10gbt_device(adapter->pdev->device)) {
+ ret = adap_init0_phy(adapter);
+ if (ret < 0)
+ goto bye;
+ }
/*
* If we have a T4 configuration file under /lib/firmware/cxgb4/,
* then use that. Otherwise, use the configuration file stored
@@ -3310,6 +3390,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
case CHELSIO_T5:
fw_config_file = FW5_CFNAME;
break;
+ case CHELSIO_T6:
+ fw_config_file = FW6_CFNAME;
+ break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
adapter->pdev->device);
@@ -3335,7 +3418,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
ret = t4_query_params(adapter, adapter->mbox,
- adapter->fn, 0, 1, params, val);
+ adapter->pf, 0, 1, params, val);
if (ret == 0) {
/*
* For t4_memory_rw() below addresses and
@@ -3506,7 +3589,24 @@ static struct fw_info fw_info_array[] = {
.intfver_iscsi = FW_INTFVER(T5, ISCSI),
.intfver_fcoe = FW_INTFVER(T5, FCOE),
},
+ }, {
+ .chip = CHELSIO_T6,
+ .fs_name = FW6_CFNAME,
+ .fw_mod_name = FW6_FNAME,
+ .fw_hdr = {
+ .chip = FW_HDR_CHIP_T6,
+ .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
+ .intfver_nic = FW_INTFVER(T6, NIC),
+ .intfver_vnic = FW_INTFVER(T6, VNIC),
+ .intfver_ofld = FW_INTFVER(T6, OFLD),
+ .intfver_ri = FW_INTFVER(T6, RI),
+ .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
+ .intfver_iscsi = FW_INTFVER(T6, ISCSI),
+ .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
+ .intfver_fcoe = FW_INTFVER(T6, FCOE),
+ },
}
+
};
static struct fw_info *find_fw_info(int chip)
@@ -3612,7 +3712,7 @@ static int adap_init0(struct adapter *adap)
* the firmware. On the other hand, we need these fairly early on
* so we do this right after getting ahold of the firmware.
*/
- ret = get_vpd_params(adap, &adap->params.vpd);
+ ret = t4_get_vpd_params(adap, &adap->params.vpd);
if (ret < 0)
goto bye;
@@ -3624,7 +3724,7 @@ static int adap_init0(struct adapter *adap)
v =
FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
if (ret < 0)
goto bye;
@@ -3647,7 +3747,7 @@ static int adap_init0(struct adapter *adap)
*/
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
params, val);
/* If the firmware doesn't support Configuration Files,
@@ -3706,7 +3806,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(FILTER_START);
params[4] = FW_PARAM_PFVF(FILTER_END);
params[5] = FW_PARAM_PFVF(IQFLINT_START);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
if (ret < 0)
goto bye;
adap->sge.egr_start = val[0];
@@ -3724,7 +3824,7 @@ static int adap_init0(struct adapter *adap)
*/
params[0] = FW_PARAM_PFVF(EQ_END);
params[1] = FW_PARAM_PFVF(IQFLINT_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
if (ret < 0)
goto bye;
adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
@@ -3745,7 +3845,7 @@ static int adap_init0(struct adapter *adap)
}
/* Allocate the memory for the vaious egress queue bitmaps
- * ie starving_fl and txq_maperr.
+ * ie starving_fl, txq_maperr and blocked_fl.
*/
adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
sizeof(long), GFP_KERNEL);
@@ -3761,9 +3861,18 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
+#ifdef CONFIG_DEBUG_FS
+ adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->sge.blocked_fl) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+#endif
+
params[0] = FW_PARAM_PFVF(CLIP_START);
params[1] = FW_PARAM_PFVF(CLIP_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
if (ret < 0)
goto bye;
adap->clipt_start = val[0];
@@ -3772,7 +3881,7 @@ static int adap_init0(struct adapter *adap)
/* query params related to active filter region */
params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
/* If Active filter size is set we enable establishing
* offload connection through firmware work request
*/
@@ -3789,7 +3898,7 @@ static int adap_init0(struct adapter *adap)
*/
params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
val[0] = 1;
- (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
+ (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
/*
* Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
@@ -3801,7 +3910,7 @@ static int adap_init0(struct adapter *adap)
adap->params.ulptx_memwrite_dsgl = false;
} else {
params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1, params, val);
adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
}
@@ -3827,7 +3936,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(TDDP_START);
params[4] = FW_PARAM_PFVF(TDDP_END);
params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
params, val);
if (ret < 0)
goto bye;
@@ -3865,7 +3974,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(RQ_END);
params[4] = FW_PARAM_PFVF(PBL_START);
params[5] = FW_PARAM_PFVF(PBL_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
params, val);
if (ret < 0)
goto bye;
@@ -3882,7 +3991,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(CQ_END);
params[4] = FW_PARAM_PFVF(OCQ_START);
params[5] = FW_PARAM_PFVF(OCQ_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
val);
if (ret < 0)
goto bye;
@@ -3895,7 +4004,7 @@ static int adap_init0(struct adapter *adap)
params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
val);
if (ret < 0) {
adap->params.max_ordird_qp = 8;
@@ -3913,7 +4022,7 @@ static int adap_init0(struct adapter *adap)
if (caps_cmd.iscsicaps) {
params[0] = FW_PARAM_PFVF(ISCSI_START);
params[1] = FW_PARAM_PFVF(ISCSI_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
params, val);
if (ret < 0)
goto bye;
@@ -3959,8 +4068,8 @@ static int adap_init0(struct adapter *adap)
adap->params.b_wnd);
}
t4_init_sge_params(adap);
- t4_init_tp_params(adap);
adap->flags |= FW_OK;
+ t4_init_tp_params(adap);
return 0;
/*
@@ -3973,6 +4082,9 @@ bye:
kfree(adap->sge.ingr_map);
kfree(adap->sge.starving_fl);
kfree(adap->sge.txq_maperr);
+#ifdef CONFIG_DEBUG_FS
+ kfree(adap->sge.blocked_fl);
+#endif
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
return ret;
@@ -4040,7 +4152,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
if (t4_wait_dev_ready(adap->regs) < 0)
return PCI_ERS_RESULT_DISCONNECT;
- if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
+ if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
return PCI_ERS_RESULT_DISCONNECT;
adap->flags |= FW_OK;
if (adap_init1(adap, &c))
@@ -4049,7 +4161,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
for_each_port(adap, i) {
struct port_info *p = adap2pinfo(adap, i);
- ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
+ ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
NULL, NULL);
if (ret < 0)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4340,7 +4452,12 @@ static int enable_msix(struct adapter *adap)
static int init_rss(struct adapter *adap)
{
- unsigned int i, j;
+ unsigned int i;
+ int err;
+
+ err = t4_init_rss_mode(adap, adap->mbox);
+ if (err)
+ return err;
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
@@ -4348,8 +4465,6 @@ static int init_rss(struct adapter *adap)
pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
if (!pi->rss)
return -ENOMEM;
- for (j = 0; j < pi->rss_size; j++)
- pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
}
return 0;
}
@@ -4413,15 +4528,23 @@ static void free_some_resources(struct adapter *adapter)
kfree(adapter->sge.ingr_map);
kfree(adapter->sge.starving_fl);
kfree(adapter->sge.txq_maperr);
+#ifdef CONFIG_DEBUG_FS
+ kfree(adapter->sge.blocked_fl);
+#endif
disable_msi(adapter);
for_each_port(adapter, i)
if (adapter->port[i]) {
+ struct port_info *pi = adap2pinfo(adapter, i);
+
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox, adapter->pf,
+ 0, pi->viid);
kfree(adap2pinfo(adapter, i)->rss);
free_netdev(adapter->port[i]);
}
if (adapter->flags & FW_OK)
- t4_fw_bye(adapter, adapter->fn);
+ t4_fw_bye(adapter, adapter->pf);
}
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
@@ -4512,7 +4635,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
adapter->mbox = func;
- adapter->fn = func;
+ adapter->pf = func;
adapter->msg_enable = dflt_msg_enable;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
@@ -4532,7 +4655,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!is_t4(adapter->params.chip)) {
s_qpp = (QUEUESPERPAGEPF0_S +
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
- adapter->fn);
+ adapter->pf);
qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
num_seg = PAGE_SIZE / SEGMENT_SIZE;
@@ -4555,10 +4678,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto out_free_adapter;
}
+ t4_write_reg(adapter, SGE_STAT_CFG_A,
+ STATSOURCE_T5_V(7) | STATMODE_V(0));
}
setup_memwin(adapter);
err = adap_init0(adapter);
+#ifdef CONFIG_DEBUG_FS
+ bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
+#endif
setup_memwin_rdma(adapter);
if (err)
goto out_unmap_bar;
@@ -4607,10 +4735,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = t4_port_init(adapter, func, func, 0);
if (err)
goto out_free_dev;
+ } else if (adapter->params.nports == 1) {
+ /* If we don't have a connection to the firmware -- possibly
+ * because of an error -- grab the raw VPD parameters so we
+ * can set the proper MAC Address on the debug network
+ * interface that we've created.
+ */
+ u8 hw_addr[ETH_ALEN];
+ u8 *na = adapter->params.vpd.na;
+
+ err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
+ if (!err) {
+ for (i = 0; i < ETH_ALEN; i++)
+ hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+ hex2val(na[2 * i + 1]));
+ t4_set_hw_addr(adapter, 0, hw_addr);
+ }
}
- /*
- * Configure queues and allocate tables now, they can be needed as
+ /* Configure queues and allocate tables now, they can be needed as
* soon as the first register_netdev completes.
*/
cfg_queues(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 78ab4d406ce2..14e8110b5dbb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -264,6 +264,7 @@ struct cxgb4_lld_info {
unsigned int max_ordird_qp; /* Max ORD/IRD depth per RDMA QP */
unsigned int max_ird_adapter; /* Max IRD memory per adapter */
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
+ int nodeid; /* device numa node id */
};
struct cxgb4_uld_info {
@@ -297,8 +298,6 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len);
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
int cxgb4_flush_eq_cache(struct net_device *dev);
-void cxgb4_disable_db_coalescing(struct net_device *dev);
-void cxgb4_enable_db_coalescing(struct net_device *dev);
int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte);
u64 cxgb4_read_sge_timestamp(struct net_device *dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 0d2eddab04ef..6b7c37fd0252 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -100,16 +100,6 @@
*/
#define TX_QCHECK_PERIOD (HZ / 2)
-/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
- * (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA
- * State Machines in the same state for this amount of time (in HZ) then we'll
- * issue a warning about a potential hang. We'll repeat the warning as the
- * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
- * the situation clears. If the situation clears, we'll note that as well.
- */
-#define SGE_IDMA_WARN_THRESH (1 * HZ)
-#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
-
/*
* Max number of Tx descriptors to be reclaimed by the Tx timer.
*/
@@ -532,14 +522,17 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
- u32 val;
if (q->pend_cred >= 8) {
+ u32 val = adap->params.arch.sge_fl_db;
+
if (is_t4(adap->params.chip))
- val = PIDX_V(q->pend_cred / 8);
+ val |= PIDX_V(q->pend_cred / 8);
else
- val = PIDX_T5_V(q->pend_cred / 8) |
- DBTYPE_F;
- val |= DBPRIO_F;
+ val |= PIDX_T5_V(q->pend_cred / 8);
+
+ /* Make sure all memory writes to the Free List queue are
+ * committed before we tell the hardware about them.
+ */
wmb();
/* If we don't have access to the new User Doorbell (T5+), use
@@ -594,6 +587,11 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
int node;
+#ifdef CONFIG_DEBUG_FS
+ if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
+ goto out;
+#endif
+
gfp |= __GFP_NOWARN;
node = dev_to_node(adap->pdev_dev);
@@ -930,7 +928,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
*/
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
- wmb(); /* write descriptors before telling HW */
+ /* Make sure that all writes to the TX Descriptors are committed
+ * before we tell the hardware about them.
+ */
+ wmb();
/* If we don't have access to the new User Doorbell (T5+), use the old
* doorbell mechanism; otherwise use the new BAR2 mechanism.
@@ -1032,7 +1033,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
* Figure out what HW csum a packet wants and return the appropriate control
* bits.
*/
-static u64 hwcsum(const struct sk_buff *skb)
+static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
{
int csum_type;
const struct iphdr *iph = ip_hdr(skb);
@@ -1047,7 +1048,7 @@ nocsum: /*
* unknown protocol, disable HW csum
* and hope a bad packet is detected
*/
- return TXPKT_L4CSUM_DIS;
+ return TXPKT_L4CSUM_DIS_F;
}
} else {
/*
@@ -1063,15 +1064,21 @@ nocsum: /*
goto nocsum;
}
- if (likely(csum_type >= TX_CSUM_TCPIP))
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
- TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
- else {
+ if (likely(csum_type >= TX_CSUM_TCPIP)) {
+ u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
+ int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+
+ if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
+ hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ else
+ hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
+ } else {
int start = skb_transport_offset(skb);
- return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
- TXPKT_CSUM_LOC(start + skb->csum_offset);
+ return TXPKT_CSUM_TYPE_V(csum_type) |
+ TXPKT_CSUM_START_V(start) |
+ TXPKT_CSUM_LOC_V(start + skb->csum_offset);
}
}
@@ -1112,11 +1119,11 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
return -ENOTSUPP;
/* FC CRC offload */
- *cntrl = TXPKT_CSUM_TYPE(TX_CSUM_FCOE) |
- TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS |
- TXPKT_CSUM_START(CXGB_FCOE_TXPKT_CSUM_START) |
- TXPKT_CSUM_END(CXGB_FCOE_TXPKT_CSUM_END) |
- TXPKT_CSUM_LOC(CXGB_FCOE_TXPKT_CSUM_END);
+ *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
+ TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
+ TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
+ TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
+ TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
return 0;
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1130,7 +1137,6 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
*/
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int len;
u32 wr_mid;
u64 cntrl, *end;
int qidx, credits;
@@ -1143,6 +1149,7 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
bool immediate = false;
+ int len, max_pkt_len;
#ifdef CONFIG_CHELSIO_T4_FCOE
int err;
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1156,13 +1163,20 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+ /* Discard the packet if the length is greater than mtu */
+ max_pkt_len = ETH_HLEN + dev->mtu;
+ if (skb_vlan_tag_present(skb))
+ max_pkt_len += VLAN_HLEN;
+ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ goto out_free;
+
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb_get_queue_mapping(skb);
q = &adap->sge.ethtxq[qidx + pi->first_qset];
reclaim_completed_tx(adap, &q->q, true);
- cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
@@ -1213,23 +1227,29 @@ out_free: dev_kfree_skb_any(skb);
len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(len));
- lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE | LSO_LAST_SLICE |
- LSO_IPV6(v6) |
- LSO_ETHHDR_LEN(eth_xtra_len / 4) |
- LSO_IPHDR_LEN(l3hdr_len / 4) |
- LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+ lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
lso->c.ipid_ofst = htons(0);
lso->c.mss = htons(ssi->gso_size);
lso->c.seqno_offset = htonl(0);
if (is_t4(adap->params.chip))
lso->c.len = htonl(skb->len);
else
- lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len));
+ lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
cpl = (void *)(lso + 1);
- cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN(l3hdr_len) |
- TXPKT_ETHHDR_LEN(eth_xtra_len);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
@@ -1238,23 +1258,25 @@ out_free: dev_kfree_skb_any(skb);
FW_WR_IMMDLEN_V(len));
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+ cntrl = hwcsum(adap->params.chip, skb) |
+ TXPKT_IPCSUM_DIS_F;
q->tx_cso++;
}
}
if (skb_vlan_tag_present(skb)) {
q->vlan_ins++;
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
#ifdef CONFIG_CHELSIO_T4_FCOE
if (skb->protocol == htons(ETH_P_FCOE))
- cntrl |= TXPKT_VLAN(
+ cntrl |= TXPKT_VLAN_V(
((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
#endif /* CONFIG_CHELSIO_T4_FCOE */
}
- cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
- TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
+ cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf));
cpl->pack = htons(0);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1964,7 +1986,7 @@ static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
static inline bool is_new_response(const struct rsp_ctrl *r,
const struct sge_rspq *q)
{
- return RSPD_GEN(r->type_gen) == q->gen;
+ return (r->type_gen >> RSPD_GEN_S) == q->gen;
}
/**
@@ -2011,19 +2033,19 @@ static int process_responses(struct sge_rspq *q, int budget)
break;
dma_rmb();
- rsp_type = RSPD_TYPE(rc->type_gen);
- if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+ rsp_type = RSPD_TYPE_G(rc->type_gen);
+ if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
struct page_frag *fp;
struct pkt_gl si;
const struct rx_sw_desc *rsd;
u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
- if (len & RSPD_NEWBUF) {
+ if (len & RSPD_NEWBUF_F) {
if (likely(q->offset > 0)) {
free_rx_bufs(q->adap, &rxq->fl, 1);
q->offset = 0;
}
- len = RSPD_LEN(len);
+ len = RSPD_LEN_G(len);
}
si.tot_len = len;
@@ -2058,7 +2080,7 @@ static int process_responses(struct sge_rspq *q, int budget)
q->offset += ALIGN(fp->size, s->fl_align);
else
restore_rx_bufs(&si, &rxq->fl, frags);
- } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+ } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
ret = q->handler(q, q->cur_desc, NULL);
} else {
ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
@@ -2066,7 +2088,7 @@ static int process_responses(struct sge_rspq *q, int budget)
if (unlikely(ret)) {
/* couldn't process descriptor, back off for recovery */
- q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
+ q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
break;
}
@@ -2090,7 +2112,7 @@ int cxgb_busy_poll(struct napi_struct *napi)
return LL_FLUSH_BUSY;
work_done = process_responses(q, 4);
- params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
+ params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1);
q->next_intr_params = params;
val = CIDXINC_V(work_done) | SEINTARM_V(params);
@@ -2137,7 +2159,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
int timer_index;
napi_complete(napi);
- timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params);
+ timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
if (q->adaptive_rx) {
if (work_done > max(timer_pkt_quota[timer_index],
@@ -2147,15 +2169,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
timer_index = timer_index - 1;
timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
- q->next_intr_params = QINTR_TIMER_IDX(timer_index) |
- V_QINTR_CNT_EN;
+ q->next_intr_params =
+ QINTR_TIMER_IDX_V(timer_index) |
+ QINTR_CNT_EN_V(0);
params = q->next_intr_params;
} else {
params = q->next_intr_params;
q->next_intr_params = q->intr_params;
}
} else
- params = QINTR_TIMER_IDX(7);
+ params = QINTR_TIMER_IDX_V(7);
val = CIDXINC_V(work_done) | SEINTARM_V(params);
@@ -2203,7 +2226,7 @@ static unsigned int process_intrq(struct adapter *adap)
break;
dma_rmb();
- if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
+ if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
unsigned int qid = ntohl(rc->pldbuflen_qid);
qid -= adap->sge.ingr_start;
@@ -2279,7 +2302,7 @@ irq_handler_t t4_intr_handler(struct adapter *adap)
static void sge_rx_timer_cb(unsigned long data)
{
unsigned long m;
- unsigned int i, idma_same_state_cnt[2];
+ unsigned int i;
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
@@ -2300,67 +2323,16 @@ static void sge_rx_timer_cb(unsigned long data)
set_bit(id, s->starving_fl);
}
}
+ /* The remainder of the SGE RX Timer Callback routine is dedicated to
+ * global Master PF activities like checking for chip ingress stalls,
+ * etc.
+ */
+ if (!(adap->flags & MASTER_PF))
+ goto done;
- t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
- idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
- idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
-
- for (i = 0; i < 2; i++) {
- u32 debug0, debug11;
-
- /* If the Ingress DMA Same State Counter ("timer") is less
- * than 1s, then we can reset our synthesized Stall Timer and
- * continue. If we have previously emitted warnings about a
- * potential stalled Ingress Queue, issue a note indicating
- * that the Ingress Queue has resumed forward progress.
- */
- if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
- if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
- CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
- i, s->idma_qid[i],
- s->idma_stalled[i]/HZ);
- s->idma_stalled[i] = 0;
- continue;
- }
-
- /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
- * domain. The first time we get here it'll be because we
- * passed the 1s Threshold; each additional time it'll be
- * because the RX Timer Callback is being fired on its regular
- * schedule.
- *
- * If the stall is below our Potential Hung Ingress Queue
- * Warning Threshold, continue.
- */
- if (s->idma_stalled[i] == 0)
- s->idma_stalled[i] = HZ;
- else
- s->idma_stalled[i] += RX_QCHECK_PERIOD;
-
- if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
- continue;
-
- /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
- if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
- continue;
-
- /* Read and save the SGE IDMA State and Queue ID information.
- * We do this every time in case it changes across time ...
- */
- t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
- debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
- s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
-
- t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
- debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
- s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
-
- CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
- i, s->idma_qid[i], s->idma_state[i],
- s->idma_stalled[i]/HZ, debug0, debug11);
- t4_sge_decode_idma_state(adap, s->idma_state[i]);
- }
+ t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
+done:
mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
}
@@ -2429,7 +2401,7 @@ static void __iomem *bar2_address(struct adapter *adapter,
u64 bar2_qoffset;
int ret;
- ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype,
+ ret = t4_bar2_sge_qregs(adapter, qid, qtype,
&bar2_qoffset, pbar2_qid);
if (ret)
return NULL;
@@ -2437,9 +2409,12 @@ static void __iomem *bar2_address(struct adapter *adapter,
return adapter->bar2 + bar2_qoffset;
}
+/* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
+ * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
+ */
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct net_device *dev, int intr_idx,
- struct sge_fl *fl, rspq_handler_t hnd)
+ struct sge_fl *fl, rspq_handler_t hnd, int cong)
{
int ret, flsz = 0;
struct fw_iq_cmd c;
@@ -2457,12 +2432,13 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0));
+ FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
FW_LEN16(c));
c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
- FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) |
+ FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
+ FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
-intr_idx - 1));
c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
@@ -2471,8 +2447,21 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
+ if (cong >= 0)
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
if (fl) {
+ enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ /* Allocate the ring for the hardware free list (with space
+ * for its status page) along with the associated software
+ * descriptor ring. The free list size needs to be a multiple
+ * of the Egress Queue Unit and at least 2 Egress Units larger
+ * than the SGE's Egress Congrestion Threshold
+ * (fl_starve_thres - 1).
+ */
+ if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
+ fl->size = s->fl_starve_thres - 1 + 2 * 8;
fl->size = roundup(fl->size, 8);
fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
sizeof(struct rx_sw_desc), &fl->addr,
@@ -2481,17 +2470,25 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
goto fl_nomem;
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
- c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F |
- FW_IQ_CMD_FL0FETCHRO_F |
- FW_IQ_CMD_FL0DATARO_F |
- FW_IQ_CMD_FL0PADEN_F);
- c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) |
- FW_IQ_CMD_FL0FBMAX_V(3));
+ c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
+ FW_IQ_CMD_FL0FETCHRO_F |
+ FW_IQ_CMD_FL0DATARO_F |
+ FW_IQ_CMD_FL0PADEN_F);
+ if (cong >= 0)
+ c.iqns_to_fl0congen |=
+ htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
+ FW_IQ_CMD_FL0CONGCIF_F |
+ FW_IQ_CMD_FL0CONGEN_F);
+ c.fl0dcaen_to_fl0cidxfthresh =
+ htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+ FETCHBURSTMAX_512B_X :
+ FETCHBURSTMAX_256B_X));
c.fl0size = htons(flsz);
c.fl0addr = cpu_to_be64(fl->addr);
}
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret)
goto err;
@@ -2532,6 +2529,41 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
&fl->bar2_qid);
refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
}
+
+ /* For T5 and later we attempt to set up the Congestion Manager values
+ * of the new RX Ethernet Queue. This should really be handled by
+ * firmware because it's more complex than any host driver wants to
+ * get involved with and it's different per chip and this is almost
+ * certainly wrong. Firmware would be wrong as well, but it would be
+ * a lot easier to fix in one place ... For now we do something very
+ * simple (and hopefully less wrong).
+ */
+ if (!is_t4(adap->params.chip) && cong >= 0) {
+ u32 param, val;
+ int i;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
+ FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
+ if (cong == 0) {
+ val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
+ } else {
+ val =
+ CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
+ for (i = 0; i < 4; i++) {
+ if (cong & (1 << i))
+ val |=
+ CONMCTXT_CNGCHMAP_V(1 << (i << 2));
+ }
+ }
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret)
+ dev_warn(adap->pdev_dev, "Failed to set Congestion"
+ " Manager Context for Ingress Queue %d: %d\n",
+ iq->cntxt_id, -ret);
+ }
+
return 0;
fl_nomem:
@@ -2589,23 +2621,24 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_EQ_ETH_CMD_PFN_V(adap->fn) |
+ FW_EQ_ETH_CMD_PFN_V(adap->pf) |
FW_EQ_ETH_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
FW_EQ_ETH_CMD_VIID_V(pi->viid));
- c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) |
- FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
- FW_EQ_ETH_CMD_FETCHRO_V(1) |
- FW_EQ_ETH_CMD_IQID_V(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) |
- FW_EQ_ETH_CMD_FBMAX_V(3) |
- FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) |
- FW_EQ_ETH_CMD_EQSIZE_V(nentries));
+ c.fetchszm_to_iqid =
+ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize =
+ htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_ETH_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
kfree(txq->q.sdesc);
txq->q.sdesc = NULL;
@@ -2637,29 +2670,30 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
sizeof(struct tx_desc), 0, &txq->q.phys_addr,
- NULL, 0, NUMA_NO_NODE);
+ NULL, 0, dev_to_node(adap->pdev_dev));
if (!txq->q.desc)
return -ENOMEM;
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_EQ_CTRL_CMD_PFN_V(adap->fn) |
+ FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
FW_EQ_CTRL_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
c.physeqid_pkd = htonl(0);
- c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) |
- FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
- FW_EQ_CTRL_CMD_FETCHRO_F |
- FW_EQ_CTRL_CMD_IQID_V(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) |
- FW_EQ_CTRL_CMD_FBMAX_V(3) |
- FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) |
- FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
+ c.fetchszm_to_iqid =
+ htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize =
+ htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
dma_free_coherent(adap->pdev_dev,
nentries * sizeof(struct tx_desc),
@@ -2697,21 +2731,22 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_EQ_OFLD_CMD_PFN_V(adap->fn) |
+ FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
FW_EQ_OFLD_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
- c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) |
- FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
- FW_EQ_OFLD_CMD_FETCHRO_F |
- FW_EQ_OFLD_CMD_IQID_V(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) |
- FW_EQ_OFLD_CMD_FBMAX_V(3) |
- FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) |
- FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
+ c.fetchszm_to_iqid =
+ htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize =
+ htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
kfree(txq->q.sdesc);
txq->q.sdesc = NULL;
@@ -2750,7 +2785,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
- t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
+ t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
rq->cntxt_id, fl_id, 0xffff);
dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
rq->desc, rq->phys_addr);
@@ -2805,7 +2840,7 @@ void t4_free_sge_resources(struct adapter *adap)
free_rspq_fl(adap, &eq->rspq,
eq->fl.size ? &eq->fl : NULL);
if (etq->q.desc) {
- t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
+ t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
etq->q.cntxt_id);
free_tx_desc(adap, &etq->q, etq->q.in_use, true);
kfree(etq->q.sdesc);
@@ -2824,7 +2859,7 @@ void t4_free_sge_resources(struct adapter *adap)
if (q->q.desc) {
tasklet_kill(&q->qresume_tsk);
- t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
+ t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
q->q.cntxt_id);
free_tx_desc(adap, &q->q, q->q.in_use, false);
kfree(q->q.sdesc);
@@ -2839,7 +2874,7 @@ void t4_free_sge_resources(struct adapter *adap)
if (cq->q.desc) {
tasklet_kill(&cq->qresume_tsk);
- t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
+ t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
cq->q.cntxt_id);
__skb_queue_purge(&cq->sendq);
free_txq(adap, &cq->q);
@@ -3023,7 +3058,11 @@ int t4_sge_init(struct adapter *adap)
* Packing Boundary. T5 introduced the ability to specify these
* separately. The actual Ingress Packet Data alignment boundary
* within Packed Buffer Mode is the maximum of these two
- * specifications.
+ * specifications. (Note that it makes no real practical sense to
+ * have the Pading Boudary be larger than the Packing Boundary but you
+ * could set the chip up that way and, in fact, legacy T4 code would
+ * end doing this because it would initialize the Padding Boundary and
+ * leave the Packing Boundary initialized to 0 (16 bytes).)
*/
ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
INGPADBOUNDARY_SHIFT_X);
@@ -3067,11 +3106,14 @@ int t4_sge_init(struct adapter *adap)
egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
s->fl_starve_thres = 2*egress_threshold + 1;
+ t4_idma_monitor_init(adap, &s->idma_monitor);
+
+ /* Set up timers used for recuring callbacks to process RX and TX
+ * administrative tasks.
+ */
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
- s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */
- s->idma_stalled[0] = 0;
- s->idma_stalled[1] = 0;
+
spin_lock_init(&s->intrq_lock);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e8578a742f2a..fdda0f8c5a19 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -150,7 +150,12 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
*/
void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
{
- u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
+ u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ req |= ENABLE_F;
+ else
+ req |= T6_ENABLE_F;
if (is_t4(adap->params.chip))
req |= LOCALCFG_F;
@@ -214,8 +219,8 @@ static void fw_asrt(struct adapter *adap, u32 mbox_addr)
get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
dev_alert(adap->pdev_dev,
"FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
- asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
- ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
+ asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
+ be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
}
static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
@@ -233,13 +238,14 @@ static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
}
/**
- * t4_wr_mbox_meat - send a command to FW through the given mailbox
+ * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
* @adap: the adapter
* @mbox: index of the mailbox to use
* @cmd: the command to write
* @size: command length in bytes
* @rpl: where to optionally store the reply
* @sleep_ok: if true we may sleep while awaiting command completion
+ * @timeout: time to wait for command to finish before timing out
*
* Sends the given command to FW through the selected mailbox and waits
* for the FW to execute the command. If @rpl is not %NULL it is used to
@@ -254,8 +260,8 @@ static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
* command or FW executes it but signals an error. In the latter case
* the return value is the error code indicated by FW (negated).
*/
-int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
- void *rpl, bool sleep_ok)
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl, bool sleep_ok, int timeout)
{
static const int delay[] = {
1, 1, 3, 5, 10, 10, 20, 50, 100, 200
@@ -294,7 +300,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
delay_idx = 0;
ms = delay[0];
- for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
+ for (i = 0; i < timeout; i += ms) {
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
if (delay_idx < ARRAY_SIZE(delay) - 1)
@@ -332,114 +338,11 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
return -ETIMEDOUT;
}
-/**
- * t4_mc_read - read from MC through backdoor accesses
- * @adap: the adapter
- * @addr: address of first byte requested
- * @idx: which MC to access
- * @data: 64 bytes of data containing the requested address
- * @ecc: where to store the corresponding 64-bit ECC word
- *
- * Read 64 bytes of data from MC starting at a 64-byte-aligned address
- * that covers the requested address @addr. If @parity is not %NULL it
- * is assigned the 64-bit ECC word for the read data.
- */
-int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
-{
- int i;
- u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
- u32 mc_bist_status_rdata, mc_bist_data_pattern;
-
- if (is_t4(adap->params.chip)) {
- mc_bist_cmd = MC_BIST_CMD_A;
- mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A;
- mc_bist_cmd_len = MC_BIST_CMD_LEN_A;
- mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A;
- mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A;
- } else {
- mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx);
- mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
- mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
- mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
- mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
- }
-
- if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F)
- return -EBUSY;
- t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
- t4_write_reg(adap, mc_bist_cmd_len, 64);
- t4_write_reg(adap, mc_bist_data_pattern, 0xc);
- t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F |
- BIST_CMD_GAP_V(1));
- i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1);
- if (i)
- return i;
-
-#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
-
- for (i = 15; i >= 0; i--)
- *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
- if (ecc)
- *ecc = t4_read_reg64(adap, MC_DATA(16));
-#undef MC_DATA
- return 0;
-}
-
-/**
- * t4_edc_read - read from EDC through backdoor accesses
- * @adap: the adapter
- * @idx: which EDC to access
- * @addr: address of first byte requested
- * @data: 64 bytes of data containing the requested address
- * @ecc: where to store the corresponding 64-bit ECC word
- *
- * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
- * that covers the requested address @addr. If @parity is not %NULL it
- * is assigned the 64-bit ECC word for the read data.
- */
-int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+ void *rpl, bool sleep_ok)
{
- int i;
- u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
- u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
-
- if (is_t4(adap->params.chip)) {
- edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx);
- edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx);
- edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx);
- edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A,
- idx);
- edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A,
- idx);
- } else {
- edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
- edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
- edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
- edc_bist_cmd_data_pattern =
- EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
- edc_bist_status_rdata =
- EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
- }
-
- if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F)
- return -EBUSY;
- t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
- t4_write_reg(adap, edc_bist_cmd_len, 64);
- t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
- t4_write_reg(adap, edc_bist_cmd,
- BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F);
- i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1);
- if (i)
- return i;
-
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
-
- for (i = 15; i >= 0; i--)
- *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
- if (ecc)
- *ecc = t4_read_reg64(adap, EDC_DATA(16));
-#undef EDC_DATA
- return 0;
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
+ FW_CMD_MAX_TIMEOUT);
}
/**
@@ -483,9 +386,8 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
/* Offset into the region of memory which is being accessed
* MEM_EDC0 = 0
* MEM_EDC1 = 1
- * MEM_MC = 2 -- T4
- * MEM_MC0 = 2 -- For T5
- * MEM_MC1 = 3 -- For T5
+ * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
+ * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
*/
edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
if (mtype != MEM_MC1)
@@ -514,7 +416,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
if (is_t4(adap->params.chip))
mem_base -= adap->t4_bar0;
- win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
+ win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
/* Calculate our initial PCI-E Memory Window Position and Offset into
* that Window.
@@ -625,6 +527,102 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
return 0;
}
+/* Return the specified PCI-E Configuration Space register from our Physical
+ * Function. We try first via a Firmware LDST Command since we prefer to let
+ * the firmware own all of these registers, but if that fails we go for it
+ * directly ourselves.
+ */
+u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
+{
+ u32 val, ldst_addrspace;
+
+ /* If fw_attach != 0, construct and send the Firmware LDST Command to
+ * retrieve the specified PCI-E Configuration Space register.
+ */
+ struct fw_ldst_cmd ldst_cmd;
+ int ret;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
+ ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ ldst_addrspace);
+ ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
+ ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
+ ldst_cmd.u.pcie.ctrl_to_fn =
+ (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
+ ldst_cmd.u.pcie.r = reg;
+
+ /* If the LDST Command succeeds, return the result, otherwise
+ * fall through to reading it directly ourselves ...
+ */
+ ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
+ &ldst_cmd);
+ if (ret == 0)
+ val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
+ else
+ /* Read the desired Configuration Space register via the PCI-E
+ * Backdoor mechanism.
+ */
+ t4_hw_pci_read_cfg4(adap, reg, &val);
+ return val;
+}
+
+/* Get the window based on base passed to it.
+ * Window aperture is currently unhandled, but there is no use case for it
+ * right now
+ */
+static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
+ u32 memwin_base)
+{
+ u32 ret;
+
+ if (is_t4(adap->params.chip)) {
+ u32 bar0;
+
+ /* Truncation intentional: we only read the bottom 32-bits of
+ * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
+ * mechanism to read BAR0 instead of using
+ * pci_resource_start() because we could be operating from
+ * within a Virtual Machine which is trapping our accesses to
+ * our Configuration Space and we need to set up the PCI-E
+ * Memory Window decoders with the actual addresses which will
+ * be coming across the PCI-E link.
+ */
+ bar0 = t4_read_pcie_cfg4(adap, pci_base);
+ bar0 &= pci_mask;
+ adap->t4_bar0 = bar0;
+
+ ret = bar0 + memwin_base;
+ } else {
+ /* For T5, only relative offset inside the PCIe BAR is passed */
+ ret = memwin_base;
+ }
+ return ret;
+}
+
+/* Get the default utility window (win0) used by everyone */
+u32 t4_get_util_window(struct adapter *adap)
+{
+ return t4_get_window(adap, PCI_BASE_ADDRESS_0,
+ PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
+}
+
+/* Set up memory window for accessing adapter memory ranges. (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
+{
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
+ memwin_base | BIR_V(0) |
+ WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
+}
+
/**
* t4_get_regs_len - return the size of the chips register set
* @adapter: the adapter
@@ -640,6 +638,7 @@ unsigned int t4_get_regs_len(struct adapter *adapter)
return T4_REGMAP_SIZE;
case CHELSIO_T5:
+ case CHELSIO_T6:
return T5_REGMAP_SIZE;
}
@@ -666,7 +665,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x11fc, 0x123c,
0x1300, 0x173c,
0x1800, 0x18fc,
- 0x3000, 0x30d8,
+ 0x3000, 0x305c,
+ 0x3068, 0x30d8,
0x30e0, 0x5924,
0x5960, 0x59d4,
0x5a00, 0x5af8,
@@ -729,7 +729,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x19238, 0x1924c,
0x193f8, 0x19474,
0x19490, 0x194f8,
- 0x19800, 0x19f30,
+ 0x19800, 0x19f4c,
0x1a000, 0x1a06c,
0x1a0b0, 0x1a120,
0x1a128, 0x1a138,
@@ -878,7 +878,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x27780, 0x2778c,
0x27800, 0x27c38,
0x27c80, 0x27d7c,
- 0x27e00, 0x27e04
+ 0x27e00, 0x27e04,
};
static const unsigned int t5_reg_ranges[] = {
@@ -888,7 +888,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1280, 0x173c,
0x1800, 0x18fc,
0x3000, 0x3028,
- 0x3060, 0x30d8,
+ 0x3068, 0x30d8,
0x30e0, 0x30fc,
0x3140, 0x357c,
0x35a8, 0x35cc,
@@ -900,7 +900,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x5940, 0x59dc,
0x59fc, 0x5a18,
0x5a60, 0x5a9c,
- 0x5b9c, 0x5bfc,
+ 0x5b94, 0x5bfc,
0x6000, 0x6040,
0x6058, 0x614c,
0x7700, 0x7798,
@@ -1014,27 +1014,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x30800, 0x30834,
0x308c0, 0x30908,
0x30910, 0x309ac,
- 0x30a00, 0x30a04,
- 0x30a0c, 0x30a2c,
+ 0x30a00, 0x30a2c,
0x30a44, 0x30a50,
0x30a74, 0x30c24,
+ 0x30d00, 0x30d00,
0x30d08, 0x30d14,
0x30d1c, 0x30d20,
0x30d3c, 0x30d50,
0x31200, 0x3120c,
0x31220, 0x31220,
0x31240, 0x31240,
- 0x31600, 0x31600,
- 0x31608, 0x3160c,
+ 0x31600, 0x3160c,
0x31a00, 0x31a1c,
- 0x31e04, 0x31e20,
+ 0x31e00, 0x31e20,
0x31e38, 0x31e3c,
0x31e80, 0x31e80,
0x31e88, 0x31ea8,
0x31eb0, 0x31eb4,
0x31ec8, 0x31ed4,
0x31fb8, 0x32004,
- 0x32208, 0x3223c,
+ 0x32200, 0x32200,
+ 0x32208, 0x32240,
+ 0x32248, 0x32280,
+ 0x32288, 0x322c0,
+ 0x322c8, 0x322fc,
0x32600, 0x32630,
0x32a00, 0x32abc,
0x32b00, 0x32b70,
@@ -1074,27 +1077,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x34800, 0x34834,
0x348c0, 0x34908,
0x34910, 0x349ac,
- 0x34a00, 0x34a04,
- 0x34a0c, 0x34a2c,
+ 0x34a00, 0x34a2c,
0x34a44, 0x34a50,
0x34a74, 0x34c24,
+ 0x34d00, 0x34d00,
0x34d08, 0x34d14,
0x34d1c, 0x34d20,
0x34d3c, 0x34d50,
0x35200, 0x3520c,
0x35220, 0x35220,
0x35240, 0x35240,
- 0x35600, 0x35600,
- 0x35608, 0x3560c,
+ 0x35600, 0x3560c,
0x35a00, 0x35a1c,
- 0x35e04, 0x35e20,
+ 0x35e00, 0x35e20,
0x35e38, 0x35e3c,
0x35e80, 0x35e80,
0x35e88, 0x35ea8,
0x35eb0, 0x35eb4,
0x35ec8, 0x35ed4,
0x35fb8, 0x36004,
- 0x36208, 0x3623c,
+ 0x36200, 0x36200,
+ 0x36208, 0x36240,
+ 0x36248, 0x36280,
+ 0x36288, 0x362c0,
+ 0x362c8, 0x362fc,
0x36600, 0x36630,
0x36a00, 0x36abc,
0x36b00, 0x36b70,
@@ -1134,27 +1140,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x38800, 0x38834,
0x388c0, 0x38908,
0x38910, 0x389ac,
- 0x38a00, 0x38a04,
- 0x38a0c, 0x38a2c,
+ 0x38a00, 0x38a2c,
0x38a44, 0x38a50,
0x38a74, 0x38c24,
+ 0x38d00, 0x38d00,
0x38d08, 0x38d14,
0x38d1c, 0x38d20,
0x38d3c, 0x38d50,
0x39200, 0x3920c,
0x39220, 0x39220,
0x39240, 0x39240,
- 0x39600, 0x39600,
- 0x39608, 0x3960c,
+ 0x39600, 0x3960c,
0x39a00, 0x39a1c,
- 0x39e04, 0x39e20,
+ 0x39e00, 0x39e20,
0x39e38, 0x39e3c,
0x39e80, 0x39e80,
0x39e88, 0x39ea8,
0x39eb0, 0x39eb4,
0x39ec8, 0x39ed4,
0x39fb8, 0x3a004,
- 0x3a208, 0x3a23c,
+ 0x3a200, 0x3a200,
+ 0x3a208, 0x3a240,
+ 0x3a248, 0x3a280,
+ 0x3a288, 0x3a2c0,
+ 0x3a2c8, 0x3a2fc,
0x3a600, 0x3a630,
0x3aa00, 0x3aabc,
0x3ab00, 0x3ab70,
@@ -1194,27 +1203,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x3c800, 0x3c834,
0x3c8c0, 0x3c908,
0x3c910, 0x3c9ac,
- 0x3ca00, 0x3ca04,
- 0x3ca0c, 0x3ca2c,
+ 0x3ca00, 0x3ca2c,
0x3ca44, 0x3ca50,
0x3ca74, 0x3cc24,
+ 0x3cd00, 0x3cd00,
0x3cd08, 0x3cd14,
0x3cd1c, 0x3cd20,
0x3cd3c, 0x3cd50,
0x3d200, 0x3d20c,
0x3d220, 0x3d220,
0x3d240, 0x3d240,
- 0x3d600, 0x3d600,
- 0x3d608, 0x3d60c,
+ 0x3d600, 0x3d60c,
0x3da00, 0x3da1c,
- 0x3de04, 0x3de20,
+ 0x3de00, 0x3de20,
0x3de38, 0x3de3c,
0x3de80, 0x3de80,
0x3de88, 0x3dea8,
0x3deb0, 0x3deb4,
0x3dec8, 0x3ded4,
0x3dfb8, 0x3e004,
- 0x3e208, 0x3e23c,
+ 0x3e200, 0x3e200,
+ 0x3e208, 0x3e240,
+ 0x3e248, 0x3e280,
+ 0x3e288, 0x3e2c0,
+ 0x3e2c8, 0x3e2fc,
0x3e600, 0x3e630,
0x3ea00, 0x3eabc,
0x3eb00, 0x3eb70,
@@ -1247,7 +1259,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x3fcf0, 0x3fcfc,
0x40000, 0x4000c,
0x40040, 0x40068,
- 0x40080, 0x40144,
+ 0x4007c, 0x40144,
0x40180, 0x4018c,
0x40200, 0x40298,
0x402ac, 0x4033c,
@@ -1275,7 +1287,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x47800, 0x47814,
0x48000, 0x4800c,
0x48040, 0x48068,
- 0x48080, 0x48144,
+ 0x4807c, 0x48144,
0x48180, 0x4818c,
0x48200, 0x48298,
0x482ac, 0x4833c,
@@ -1309,6 +1321,344 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x51300, 0x51308,
};
+ static const unsigned int t6_reg_ranges[] = {
+ 0x1008, 0x114c,
+ 0x1180, 0x11b4,
+ 0x11fc, 0x1250,
+ 0x1280, 0x133c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x302c,
+ 0x3060, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x58bc,
+ 0x5940, 0x595c,
+ 0x5980, 0x598c,
+ 0x59b0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a6c,
+ 0x5a80, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x5c10, 0x5ec0,
+ 0x5ec8, 0x5ec8,
+ 0x6000, 0x6040,
+ 0x6058, 0x6154,
+ 0x7700, 0x7798,
+ 0x77c0, 0x7880,
+ 0x78cc, 0x78fc,
+ 0x7b00, 0x7c54,
+ 0x7d00, 0x7efc,
+ 0x8dc0, 0x8de0,
+ 0x8df8, 0x8e84,
+ 0x8ea0, 0x8f88,
+ 0x8fb8, 0x911c,
+ 0x9400, 0x9470,
+ 0x9600, 0x971c,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xf008,
+ 0x11000, 0x11014,
+ 0x11048, 0x11110,
+ 0x11118, 0x1117c,
+ 0x11190, 0x11260,
+ 0x11300, 0x1130c,
+ 0x12000, 0x1205c,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x19124,
+ 0x19150, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x192b8,
+ 0x193f8, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c80,
+ 0x19c94, 0x19cbc,
+ 0x19ce4, 0x19d28,
+ 0x19d50, 0x19d78,
+ 0x19d94, 0x19dc8,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e6c,
+ 0x19ea0, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fac,
+ 0x19fc4, 0x19fe4,
+ 0x1a000, 0x1a06c,
+ 0x1a0b0, 0x1a120,
+ 0x1a128, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30070,
+ 0x30100, 0x3015c,
+ 0x30190, 0x301d0,
+ 0x30200, 0x30318,
+ 0x30400, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x3088c,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309b8,
+ 0x30a00, 0x30a04,
+ 0x30a0c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30c24,
+ 0x30d00, 0x30d3c,
+ 0x30d44, 0x30d7c,
+ 0x30de0, 0x30de0,
+ 0x30e00, 0x30ed4,
+ 0x30f00, 0x30fa4,
+ 0x30fc0, 0x30fc4,
+ 0x31000, 0x31004,
+ 0x31080, 0x310fc,
+ 0x31208, 0x31220,
+ 0x3123c, 0x31254,
+ 0x31300, 0x31300,
+ 0x31308, 0x3131c,
+ 0x31338, 0x3133c,
+ 0x31380, 0x31380,
+ 0x31388, 0x313a8,
+ 0x313b4, 0x313b4,
+ 0x31400, 0x31420,
+ 0x31438, 0x3143c,
+ 0x31480, 0x31480,
+ 0x314a8, 0x314a8,
+ 0x314b0, 0x314b4,
+ 0x314c8, 0x314d4,
+ 0x31a40, 0x31a4c,
+ 0x31af0, 0x31b20,
+ 0x31b38, 0x31b3c,
+ 0x31b80, 0x31b80,
+ 0x31ba8, 0x31ba8,
+ 0x31bb0, 0x31bb4,
+ 0x31bc8, 0x31bd4,
+ 0x32140, 0x3218c,
+ 0x321f0, 0x32200,
+ 0x32218, 0x32218,
+ 0x32400, 0x32400,
+ 0x32408, 0x3241c,
+ 0x32618, 0x32620,
+ 0x32664, 0x32664,
+ 0x326a8, 0x326a8,
+ 0x326ec, 0x326ec,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b78,
+ 0x32c00, 0x32c00,
+ 0x32c08, 0x32c3c,
+ 0x32e00, 0x32e2c,
+ 0x32f00, 0x32f2c,
+ 0x33000, 0x330ac,
+ 0x330c0, 0x331ac,
+ 0x331c0, 0x332c4,
+ 0x332e4, 0x333c4,
+ 0x333e4, 0x334ac,
+ 0x334c0, 0x335ac,
+ 0x335c0, 0x336c4,
+ 0x336e4, 0x337c4,
+ 0x337e4, 0x337fc,
+ 0x33814, 0x33814,
+ 0x33854, 0x33868,
+ 0x33880, 0x3388c,
+ 0x338c0, 0x338d0,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x339ac,
+ 0x339c0, 0x33ac4,
+ 0x33ae4, 0x33b10,
+ 0x33b24, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c24, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34070,
+ 0x34100, 0x3415c,
+ 0x34190, 0x341d0,
+ 0x34200, 0x34318,
+ 0x34400, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x3488c,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349b8,
+ 0x34a00, 0x34a04,
+ 0x34a0c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34c24,
+ 0x34d00, 0x34d3c,
+ 0x34d44, 0x34d7c,
+ 0x34de0, 0x34de0,
+ 0x34e00, 0x34ed4,
+ 0x34f00, 0x34fa4,
+ 0x34fc0, 0x34fc4,
+ 0x35000, 0x35004,
+ 0x35080, 0x350fc,
+ 0x35208, 0x35220,
+ 0x3523c, 0x35254,
+ 0x35300, 0x35300,
+ 0x35308, 0x3531c,
+ 0x35338, 0x3533c,
+ 0x35380, 0x35380,
+ 0x35388, 0x353a8,
+ 0x353b4, 0x353b4,
+ 0x35400, 0x35420,
+ 0x35438, 0x3543c,
+ 0x35480, 0x35480,
+ 0x354a8, 0x354a8,
+ 0x354b0, 0x354b4,
+ 0x354c8, 0x354d4,
+ 0x35a40, 0x35a4c,
+ 0x35af0, 0x35b20,
+ 0x35b38, 0x35b3c,
+ 0x35b80, 0x35b80,
+ 0x35ba8, 0x35ba8,
+ 0x35bb0, 0x35bb4,
+ 0x35bc8, 0x35bd4,
+ 0x36140, 0x3618c,
+ 0x361f0, 0x36200,
+ 0x36218, 0x36218,
+ 0x36400, 0x36400,
+ 0x36408, 0x3641c,
+ 0x36618, 0x36620,
+ 0x36664, 0x36664,
+ 0x366a8, 0x366a8,
+ 0x366ec, 0x366ec,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b78,
+ 0x36c00, 0x36c00,
+ 0x36c08, 0x36c3c,
+ 0x36e00, 0x36e2c,
+ 0x36f00, 0x36f2c,
+ 0x37000, 0x370ac,
+ 0x370c0, 0x371ac,
+ 0x371c0, 0x372c4,
+ 0x372e4, 0x373c4,
+ 0x373e4, 0x374ac,
+ 0x374c0, 0x375ac,
+ 0x375c0, 0x376c4,
+ 0x376e4, 0x377c4,
+ 0x377e4, 0x377fc,
+ 0x37814, 0x37814,
+ 0x37854, 0x37868,
+ 0x37880, 0x3788c,
+ 0x378c0, 0x378d0,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x379ac,
+ 0x379c0, 0x37ac4,
+ 0x37ae4, 0x37b10,
+ 0x37b24, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c24, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x40040, 0x40040,
+ 0x40080, 0x40084,
+ 0x40100, 0x40100,
+ 0x40140, 0x401bc,
+ 0x40200, 0x40214,
+ 0x40228, 0x40228,
+ 0x40240, 0x40258,
+ 0x40280, 0x40280,
+ 0x40304, 0x40304,
+ 0x40330, 0x4033c,
+ 0x41304, 0x413dc,
+ 0x41400, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x4407c,
+ 0x440c0, 0x4427c,
+ 0x442c0, 0x4447c,
+ 0x444c0, 0x4467c,
+ 0x446c0, 0x4487c,
+ 0x448c0, 0x44a7c,
+ 0x44ac0, 0x44c7c,
+ 0x44cc0, 0x44e7c,
+ 0x44ec0, 0x4507c,
+ 0x450c0, 0x451fc,
+ 0x45800, 0x45868,
+ 0x45880, 0x45884,
+ 0x458a0, 0x458b0,
+ 0x45a00, 0x45a68,
+ 0x45a80, 0x45a84,
+ 0x45aa0, 0x45ab0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x4782c,
+ 0x50000, 0x500cc,
+ 0x50400, 0x50400,
+ 0x50800, 0x508cc,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x510b0,
+ 0x51300, 0x51324,
+ };
+
u32 *buf_end = (u32 *)((char *)buf + buf_size);
const unsigned int *reg_ranges;
int reg_ranges_size, range;
@@ -1328,6 +1678,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
break;
+ case CHELSIO_T6:
+ reg_ranges = t6_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+ break;
+
default:
dev_err(adap->pdev_dev,
"Unsupported chip version %d\n", chip_version);
@@ -1374,17 +1729,16 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
}
/**
- * get_vpd_params - read VPD parameters from VPD EEPROM
+ * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
* @adapter: adapter to read
* @p: where to store the parameters
*
* Reads card parameters stored in VPD EEPROM.
*/
-int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- u32 cclk_param, cclk_val;
- int i, ret, addr;
- int ec, sn, pn;
+ int i, ret = 0, addr;
+ int ec, sn, pn, na;
u8 *vpd, csum;
unsigned int vpdr_len, kw_offset, id_len;
@@ -1392,6 +1746,9 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (!vpd)
return -ENOMEM;
+ /* Card information normally starts at VPD_BASE but early cards had
+ * it at 0.
+ */
ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
if (ret < 0)
goto out;
@@ -1457,6 +1814,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
FIND_VPD_KW(ec, "EC");
FIND_VPD_KW(sn, "SN");
FIND_VPD_KW(pn, "PN");
+ FIND_VPD_KW(na, "NA");
#undef FIND_VPD_KW
memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
@@ -1469,18 +1827,42 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->pn, vpd + pn, min(i, PN_LEN));
strim(p->pn);
+ memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
+ strim((char *)p->na);
- /*
- * Ask firmware for the Core Clock since it knows how to translate the
+out:
+ vfree(vpd);
+ return ret;
+}
+
+/**
+ * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
+ * @adapter: adapter to read
+ * @p: where to store the parameters
+ *
+ * Reads card parameters stored in VPD EEPROM and retrieves the Core
+ * Clock. This can only be called after a connection to the firmware
+ * is established.
+ */
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+ u32 cclk_param, cclk_val;
+ int ret;
+
+ /* Grab the raw VPD parameters.
+ */
+ ret = t4_get_raw_vpd_params(adapter, p);
+ if (ret)
+ return ret;
+
+ /* Ask firmware for the Core Clock since it knows how to translate the
* Reference Clock ('V2') VPD field into a Core Clock value ...
*/
cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
- ret = t4_query_params(adapter, adapter->mbox, 0, 0,
+ ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
1, &cclk_param, &cclk_val);
-out:
- vfree(vpd);
if (ret)
return ret;
p->cclk = cclk_val;
@@ -1618,7 +2000,7 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
if (ret)
return ret;
if (byte_oriented)
- *data = (__force __u32) (htonl(*data));
+ *data = (__force __u32)(cpu_to_be32(*data));
}
return 0;
}
@@ -1941,7 +2323,8 @@ static bool t4_fw_matches_chip(const struct adapter *adap,
* which will keep us "honest" in the future ...
*/
if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
- (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5))
+ (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
+ (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
return true;
dev_err(adap->pdev_dev,
@@ -1979,7 +2362,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
"FW image size not multiple of 512 bytes\n");
return -EINVAL;
}
- if (ntohs(hdr->len512) * 512 != size) {
+ if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
dev_err(adap->pdev_dev,
"FW image size differs from size in FW header\n");
return -EINVAL;
@@ -1993,7 +2376,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
return -EINVAL;
for (csum = 0, i = 0; i < size / sizeof(csum); i++)
- csum += ntohl(p[i]);
+ csum += be32_to_cpu(p[i]);
if (csum != 0xffffffff) {
dev_err(adap->pdev_dev,
@@ -2012,7 +2395,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
* first page with a bad version.
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
- ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
+ ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
if (ret)
goto out;
@@ -2039,6 +2422,147 @@ out:
}
/**
+ * t4_phy_fw_ver - return current PHY firmware version
+ * @adap: the adapter
+ * @phy_fw_ver: return value buffer for PHY firmware version
+ *
+ * Returns the current version of external PHY firmware on the
+ * adapter.
+ */
+int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
+{
+ u32 param, val;
+ int ret;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+ FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+ FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret < 0)
+ return ret;
+ *phy_fw_ver = val;
+ return 0;
+}
+
+/**
+ * t4_load_phy_fw - download port PHY firmware
+ * @adap: the adapter
+ * @win: the PCI-E Memory Window index to use for t4_memory_rw()
+ * @win_lock: the lock to use to guard the memory copy
+ * @phy_fw_version: function to check PHY firmware versions
+ * @phy_fw_data: the PHY firmware image to write
+ * @phy_fw_size: image size
+ *
+ * Transfer the specified PHY firmware to the adapter. If a non-NULL
+ * @phy_fw_version is supplied, then it will be used to determine if
+ * it's necessary to perform the transfer by comparing the version
+ * of any existing adapter PHY firmware with that of the passed in
+ * PHY firmware image. If @win_lock is non-NULL then it will be used
+ * around the call to t4_memory_rw() which transfers the PHY firmware
+ * to the adapter.
+ *
+ * A negative error number will be returned if an error occurs. If
+ * version number support is available and there's no need to upgrade
+ * the firmware, 0 will be returned. If firmware is successfully
+ * transferred to the adapter, 1 will be retured.
+ *
+ * NOTE: some adapters only have local RAM to store the PHY firmware. As
+ * a result, a RESET of the adapter would cause that RAM to lose its
+ * contents. Thus, loading PHY firmware on such adapters must happen
+ * after any FW_RESET_CMDs ...
+ */
+int t4_load_phy_fw(struct adapter *adap,
+ int win, spinlock_t *win_lock,
+ int (*phy_fw_version)(const u8 *, size_t),
+ const u8 *phy_fw_data, size_t phy_fw_size)
+{
+ unsigned long mtype = 0, maddr = 0;
+ u32 param, val;
+ int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
+ int ret;
+
+ /* If we have version number support, then check to see if the adapter
+ * already has up-to-date PHY firmware loaded.
+ */
+ if (phy_fw_version) {
+ new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
+ ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+ if (ret < 0)
+ return ret;
+
+ if (cur_phy_fw_ver >= new_phy_fw_vers) {
+ CH_WARN(adap, "PHY Firmware already up-to-date, "
+ "version %#x\n", cur_phy_fw_ver);
+ return 0;
+ }
+ }
+
+ /* Ask the firmware where it wants us to copy the PHY firmware image.
+ * The size of the file requires a special version of the READ coommand
+ * which will pass the file size via the values field in PARAMS_CMD and
+ * retrieve the return value from firmware and place it in the same
+ * buffer values
+ */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+ FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+ FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
+ val = phy_fw_size;
+ ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val, 1);
+ if (ret < 0)
+ return ret;
+ mtype = val >> 8;
+ maddr = (val & 0xff) << 16;
+
+ /* Copy the supplied PHY Firmware image to the adapter memory location
+ * allocated by the adapter firmware.
+ */
+ if (win_lock)
+ spin_lock_bh(win_lock);
+ ret = t4_memory_rw(adap, win, mtype, maddr,
+ phy_fw_size, (__be32 *)phy_fw_data,
+ T4_MEMORY_WRITE);
+ if (win_lock)
+ spin_unlock_bh(win_lock);
+ if (ret)
+ return ret;
+
+ /* Tell the firmware that the PHY firmware image has been written to
+ * RAM and it can now start copying it over to the PHYs. The chip
+ * firmware will RESET the affected PHYs as part of this operation
+ * leaving them running the new PHY firmware image.
+ */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+ FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+ FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
+ ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val, 30000);
+
+ /* If we have version number support, then check to see that the new
+ * firmware got loaded properly.
+ */
+ if (phy_fw_version) {
+ ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+ if (ret < 0)
+ return ret;
+
+ if (cur_phy_fw_ver != new_phy_fw_vers) {
+ CH_WARN(adap, "PHY Firmware did not update: "
+ "version on adapter %#x, "
+ "version flashed %#x\n",
+ cur_phy_fw_ver, new_phy_fw_vers);
+ return -ENXIO;
+ }
+ }
+
+ return 1;
+}
+
+/**
* t4_fwcache - firmware cache operation
* @adap: the adapter
* @op : the operation (flush or flush and invalidate)
@@ -2051,7 +2575,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
c.op_to_vfn =
cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
- FW_PARAMS_CMD_PFN_V(adap->fn) |
+ FW_PARAMS_CMD_PFN_V(adap->pf) |
FW_PARAMS_CMD_VFN_V(0));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
c.param[0].mnem =
@@ -2082,7 +2606,7 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
FW_PORT_CAP_ANEG)
/**
- * t4_link_start - apply link configuration to MAC/PHY
+ * t4_link_l1cfg - apply link configuration to MAC/PHY
* @phy: the PHY to setup
* @mac: the MAC to setup
* @lc: the requested link configuration
@@ -2094,7 +2618,7 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
* - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
* otherwise do it later based on the outcome of auto-negotiation.
*/
-int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc)
{
struct fw_port_cmd c;
@@ -2107,19 +2631,22 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
fc |= FW_PORT_CAP_FC_TX;
memset(&c, 0, sizeof(c));
- c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
- c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
- FW_LEN16(c));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_PORT_CMD_PORTID_V(port));
+ c.action_to_len16 =
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+ FW_LEN16(c));
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
- c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
+ c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
+ fc);
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
} else if (lc->autoneg == AUTONEG_DISABLE) {
- c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
+ c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
} else
- c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
+ c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -2137,11 +2664,13 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
struct fw_port_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
- c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
- FW_LEN16(c));
- c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_PORT_CMD_PORTID_V(port));
+ c.action_to_len16 =
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+ FW_LEN16(c));
+ c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -2335,6 +2864,7 @@ static void tp_intr_handler(struct adapter *adapter)
static void sge_intr_handler(struct adapter *adapter)
{
u64 v;
+ u32 err;
static const struct intr_info sge_intr_info[] = {
{ ERR_CPL_EXCEED_IQE_SIZE_F,
@@ -2343,8 +2873,6 @@ static void sge_intr_handler(struct adapter *adapter)
"SGE GTS CIDX increment too large", -1, 0 },
{ ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
{ DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
- { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
- { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
{ ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
"SGE IQID > 1023 received CPL for FL", -1, 0 },
{ ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
@@ -2357,13 +2885,19 @@ static void sge_intr_handler(struct adapter *adapter)
0 },
{ ERR_ING_CTXT_PRIO_F,
"SGE too many priority ingress contexts", -1, 0 },
- { ERR_EGR_CTXT_PRIO_F,
- "SGE too many priority egress contexts", -1, 0 },
{ INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
{ EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
{ 0 }
};
+ static struct intr_info t4t5_sge_intr_info[] = {
+ { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
+ { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
+ { ERR_EGR_CTXT_PRIO_F,
+ "SGE too many priority egress contexts", -1, 0 },
+ { 0 }
+ };
+
v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
if (v) {
@@ -2373,8 +2907,23 @@ static void sge_intr_handler(struct adapter *adapter)
t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
}
- if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
- v != 0)
+ v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
+ t4t5_sge_intr_info);
+
+ err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
+ if (err & ERROR_QID_VALID_F) {
+ dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
+ ERROR_QID_G(err));
+ if (err & UNCAPTURED_ERROR_F)
+ dev_err(adapter->pdev_dev,
+ "SGE UNCAPTURED_ERROR set (clearing)\n");
+ t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
+ UNCAPTURED_ERROR_F);
+ }
+
+ if (v != 0)
t4_fatal_err(adapter);
}
@@ -2547,6 +3096,7 @@ static void cplsw_intr_handler(struct adapter *adapter)
*/
static void le_intr_handler(struct adapter *adap)
{
+ enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
static const struct intr_info le_intr_info[] = {
{ LIPMISS_F, "LE LIP miss", -1, 0 },
{ LIP0_F, "LE 0 LIP error", -1, 0 },
@@ -2556,7 +3106,18 @@ static void le_intr_handler(struct adapter *adap)
{ 0 }
};
- if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
+ static struct intr_info t6_le_intr_info[] = {
+ { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
+ { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
+ { TCAMINTPERR_F, "LE parity error", -1, 1 },
+ { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+ { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
+ (chip <= CHELSIO_T5) ?
+ le_intr_info : t6_le_intr_info))
t4_fatal_err(adap);
}
@@ -2825,7 +3386,7 @@ int t4_slow_intr_handler(struct adapter *adapter)
pcie_intr_handler(adapter);
if (cause & MC_F)
mem_intr_handler(adapter, MEM_MC);
- if (!is_t4(adapter->params.chip) && (cause & MC1_S))
+ if (is_t5(adapter->params.chip) && (cause & MC1_F))
mem_intr_handler(adapter, MEM_MC1);
if (cause & EDC0_F)
mem_intr_handler(adapter, MEM_EDC0);
@@ -2871,17 +3432,18 @@ int t4_slow_intr_handler(struct adapter *adapter)
*/
void t4_intr_enable(struct adapter *adapter)
{
+ u32 val = 0;
u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
- ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
+ ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
- ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
- EGRESS_SIZE_ERR_F);
+ DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
}
@@ -2945,18 +3507,18 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
struct fw_rss_ind_tbl_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
FW_RSS_IND_TBL_CMD_VIID_V(viid));
- cmd.retval_len16 = htonl(FW_LEN16(cmd));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
while (n > 0) {
int nq = min(n, 32);
__be32 *qp = &cmd.iq0_to_iq2;
- cmd.niqid = htons(nq);
- cmd.startidx = htons(start);
+ cmd.niqid = cpu_to_be16(nq);
+ cmd.startidx = cpu_to_be16(start);
start += nq;
n -= nq;
@@ -2974,7 +3536,7 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
if (++rsp >= rsp_end)
rsp = rspq;
- *qp++ = htonl(v);
+ *qp++ = cpu_to_be32(v);
nq -= 3;
}
@@ -3000,20 +3562,46 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
struct fw_rss_glb_config_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
- c.retval_len16 = htonl(FW_LEN16(c));
+ c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
- c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
+ c.u.manual.mode_pkd =
+ cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
c.u.basicvirtual.mode_pkd =
- htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
- c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
+ cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
+ c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
} else
return -EINVAL;
return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
}
+/**
+ * t4_config_vi_rss - configure per VI RSS settings
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: the VI id
+ * @flags: RSS flags
+ * @defq: id of the default RSS queue for the VI.
+ *
+ * Configures VI-specific RSS properties.
+ */
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ unsigned int flags, unsigned int defq)
+{
+ struct fw_rss_vi_config_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
+ FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
+ return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+}
+
/* Read an RSS table row */
static int rd_rss_row(struct adapter *adap, int row, u32 *val)
{
@@ -3045,6 +3633,40 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
}
/**
+ * t4_fw_tp_pio_rw - Access TP PIO through LDST
+ * @adap: the adapter
+ * @vals: where the indirect register values are stored/written
+ * @nregs: how many indirect registers to read/write
+ * @start_idx: index of first indirect register to read/write
+ * @rw: Read (1) or Write (0)
+ *
+ * Access TP PIO registers through LDST
+ */
+static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+ unsigned int start_index, unsigned int rw)
+{
+ int ret, i;
+ int cmd = FW_LDST_ADDRSPC_TP_PIO;
+ struct fw_ldst_cmd c;
+
+ for (i = 0 ; i < nregs; i++) {
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ (rw ? FW_CMD_READ_F :
+ FW_CMD_WRITE_F) |
+ FW_LDST_CMD_ADDRSPACE_V(cmd));
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+
+ c.u.addrval.addr = cpu_to_be32(start_index + i);
+ c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (!ret && rw)
+ vals[i] = be32_to_cpu(c.u.addrval.val);
+ }
+}
+
+/**
* t4_read_rss_key - read the global RSS key
* @adap: the adapter
* @key: 10-entry array holding the 320-bit RSS key
@@ -3053,8 +3675,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
*/
void t4_read_rss_key(struct adapter *adap, u32 *key)
{
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
- TP_RSS_SECRET_KEY0_A);
+ if (adap->flags & FW_OK)
+ t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
+ else
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+ TP_RSS_SECRET_KEY0_A);
}
/**
@@ -3069,11 +3694,32 @@ void t4_read_rss_key(struct adapter *adap, u32 *key)
*/
void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
{
- t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
- TP_RSS_SECRET_KEY0_A);
- if (idx >= 0 && idx < 16)
- t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
- KEYWRADDR_V(idx) | KEYWREN_F);
+ u8 rss_key_addr_cnt = 16;
+ u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
+
+ /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
+ * allows access to key addresses 16-63 by using KeyWrAddrX
+ * as index[5:4](upper 2) into key table
+ */
+ if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
+ (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
+ rss_key_addr_cnt = 32;
+
+ if (adap->flags & FW_OK)
+ t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
+ else
+ t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+ TP_RSS_SECRET_KEY0_A);
+
+ if (idx >= 0 && idx < rss_key_addr_cnt) {
+ if (rss_key_addr_cnt > 16)
+ t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+ KEYWRADDRX_V(idx >> 4) |
+ T6_VFWRADDR_V(idx) | KEYWREN_F);
+ else
+ t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+ KEYWRADDR_V(idx) | KEYWREN_F);
+ }
}
/**
@@ -3088,8 +3734,12 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
u32 *valp)
{
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- valp, 1, TP_RSS_PF0_CONFIG_A + index);
+ if (adapter->flags & FW_OK)
+ t4_fw_tp_pio_rw(adapter, valp, 1,
+ TP_RSS_PF0_CONFIG_A + index, 1);
+ else
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ valp, 1, TP_RSS_PF0_CONFIG_A + index);
}
/**
@@ -3107,8 +3757,13 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
{
u32 vrt, mask, data;
- mask = VFWRADDR_V(VFWRADDR_M);
- data = VFWRADDR_V(index);
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
+ mask = VFWRADDR_V(VFWRADDR_M);
+ data = VFWRADDR_V(index);
+ } else {
+ mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
+ data = T6_VFWRADDR_V(index);
+ }
/* Request that the index'th VF Table values be read into VFL/VFH.
*/
@@ -3119,10 +3774,15 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
/* Grab the VFL/VFH values ...
*/
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- vfl, 1, TP_RSS_VFL_CONFIG_A);
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- vfh, 1, TP_RSS_VFH_CONFIG_A);
+ if (adapter->flags & FW_OK) {
+ t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
+ t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
+ } else {
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ vfl, 1, TP_RSS_VFL_CONFIG_A);
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ vfh, 1, TP_RSS_VFH_CONFIG_A);
+ }
}
/**
@@ -3135,8 +3795,11 @@ u32 t4_read_rss_pf_map(struct adapter *adapter)
{
u32 pfmap;
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &pfmap, 1, TP_RSS_PF_MAP_A);
+ if (adapter->flags & FW_OK)
+ t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
+ else
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &pfmap, 1, TP_RSS_PF_MAP_A);
return pfmap;
}
@@ -3150,8 +3813,11 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter)
{
u32 pfmask;
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &pfmask, 1, TP_RSS_PF_MSK_A);
+ if (adapter->flags & FW_OK)
+ t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
+ else
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &pfmask, 1, TP_RSS_PF_MSK_A);
return pfmask;
}
@@ -3176,18 +3842,18 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
if (v4) {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
- v4->tcpOutRsts = STAT(OUT_RST);
- v4->tcpInSegs = STAT64(IN_SEG);
- v4->tcpOutSegs = STAT64(OUT_SEG);
- v4->tcpRetransSegs = STAT64(RXT_SEG);
+ v4->tcp_out_rsts = STAT(OUT_RST);
+ v4->tcp_in_segs = STAT64(IN_SEG);
+ v4->tcp_out_segs = STAT64(OUT_SEG);
+ v4->tcp_retrans_segs = STAT64(RXT_SEG);
}
if (v6) {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
- v6->tcpOutRsts = STAT(OUT_RST);
- v6->tcpInSegs = STAT64(IN_SEG);
- v6->tcpOutSegs = STAT64(OUT_SEG);
- v6->tcpRetransSegs = STAT64(RXT_SEG);
+ v6->tcp_out_rsts = STAT(OUT_RST);
+ v6->tcp_in_segs = STAT64(IN_SEG);
+ v6->tcp_out_segs = STAT64(OUT_SEG);
+ v6->tcp_retrans_segs = STAT64(RXT_SEG);
}
#undef STAT64
#undef STAT
@@ -3195,6 +3861,130 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
}
/**
+ * t4_tp_get_err_stats - read TP's error MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's error counters.
+ */
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
+{
+ /* T6 and later has 2 channels */
+ if (adap->params.arch.nchan == NCHAN) {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_cong_drops, 8,
+ TP_MIB_TNL_CNG_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_tx_drops, 4,
+ TP_MIB_TNL_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_vlan_drops, 4,
+ TP_MIB_OFD_VLN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp6_in_errs, 4,
+ TP_MIB_TCP_V6IN_ERR_0_A);
+ } else {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_cong_drops, 2,
+ TP_MIB_TNL_CNG_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_chan_drops, 2,
+ TP_MIB_OFD_CHN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_vlan_drops, 2,
+ TP_MIB_OFD_VLN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
+ }
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
+}
+
+/**
+ * t4_tp_get_cpl_stats - read TP's CPL MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's CPL counters.
+ */
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
+{
+ /* T6 and later has 2 channels */
+ if (adap->params.arch.nchan == NCHAN) {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+ 8, TP_MIB_CPL_IN_REQ_0_A);
+ } else {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+ 2, TP_MIB_CPL_IN_REQ_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
+ 2, TP_MIB_CPL_OUT_RSP_0_A);
+ }
+}
+
+/**
+ * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's RDMA counters.
+ */
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
+{
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
+ 2, TP_MIB_RQE_DFR_PKT_A);
+}
+
+/**
+ * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
+ * @adap: the adapter
+ * @idx: the port index
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's FCoE counters for the selected port.
+ */
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+ struct tp_fcoe_stats *st)
+{
+ u32 val[2];
+
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
+ 1, TP_MIB_FCOE_DDP_0_A + idx);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
+ 1, TP_MIB_FCOE_DROP_0_A + idx);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+ 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
+ st->octets_ddp = ((u64)val[0] << 32) | val[1];
+}
+
+/**
+ * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's counters for non-TCP directly-placed packets.
+ */
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
+{
+ u32 val[4];
+
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
+ TP_MIB_USM_PKTS_A);
+ st->frames = val[0];
+ st->drops = val[1];
+ st->octets = ((u64)val[2] << 32) | val[3];
+}
+
+/**
* t4_read_mtu_tbl - returns the values in the HW path MTU table
* @adap: the adapter
* @mtus: where to store the MTU values
@@ -3401,7 +4191,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
}
/**
- * get_mps_bg_map - return the buffer groups associated with a port
+ * t4_get_mps_bg_map - return the buffer groups associated with a port
* @adap: the adapter
* @idx: the port index
*
@@ -3409,7 +4199,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
* with the given port. Bit i is set if buffer group i is used by the
* port.
*/
-static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
+unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
{
u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
@@ -3451,6 +4241,28 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
}
/**
+ * t4_get_port_stats_offset - collect port stats relative to a previous
+ * snapshot
+ * @adap: The adapter
+ * @idx: The port
+ * @stats: Current stats to fill
+ * @offset: Previous stats snapshot
+ */
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+ struct port_stats *stats,
+ struct port_stats *offset)
+{
+ u64 *s, *o;
+ int i;
+
+ t4_get_port_stats(adap, idx, stats);
+ for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
+ i < (sizeof(struct port_stats) / sizeof(u64));
+ i++, s++, o++)
+ *s -= *o;
+}
+
+/**
* t4_get_port_stats - collect port statistics
* @adap: the adapter
* @idx: the port index
@@ -3460,7 +4272,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
*/
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
{
- u32 bgmap = get_mps_bg_map(adap, idx);
+ u32 bgmap = t4_get_mps_bg_map(adap, idx);
#define GET_STAT(name) \
t4_read_reg64(adap, \
@@ -3534,103 +4346,51 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
}
/**
- * t4_wol_magic_enable - enable/disable magic packet WoL
- * @adap: the adapter
- * @port: the physical port index
- * @addr: MAC address expected in magic packets, %NULL to disable
- *
- * Enables/disables magic packet wake-on-LAN for the selected port.
- */
-void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
- const u8 *addr)
-{
- u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
-
- if (is_t4(adap->params.chip)) {
- mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
- mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
- port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
- } else {
- mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
- mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
- port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
- }
-
- if (addr) {
- t4_write_reg(adap, mag_id_reg_l,
- (addr[2] << 24) | (addr[3] << 16) |
- (addr[4] << 8) | addr[5]);
- t4_write_reg(adap, mag_id_reg_h,
- (addr[0] << 8) | addr[1]);
- }
- t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
- addr ? MAGICEN_F : 0);
-}
-
-/**
- * t4_wol_pat_enable - enable/disable pattern-based WoL
+ * t4_get_lb_stats - collect loopback port statistics
* @adap: the adapter
- * @port: the physical port index
- * @map: bitmap of which HW pattern filters to set
- * @mask0: byte mask for bytes 0-63 of a packet
- * @mask1: byte mask for bytes 64-127 of a packet
- * @crc: Ethernet CRC for selected bytes
- * @enable: enable/disable switch
+ * @idx: the loopback port index
+ * @p: the stats structure to fill
*
- * Sets the pattern filters indicated in @map to mask out the bytes
- * specified in @mask0/@mask1 in received packets and compare the CRC of
- * the resulting packet against @crc. If @enable is %true pattern-based
- * WoL is enabled, otherwise disabled.
+ * Return HW statistics for the given loopback port.
*/
-int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
- u64 mask0, u64 mask1, unsigned int crc, bool enable)
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
{
- int i;
- u32 port_cfg_reg;
-
- if (is_t4(adap->params.chip))
- port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
- else
- port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
+ u32 bgmap = t4_get_mps_bg_map(adap, idx);
- if (!enable) {
- t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
- return 0;
- }
- if (map > 0xff)
- return -EINVAL;
-
-#define EPIO_REG(name) \
+#define GET_STAT(name) \
+ t4_read_reg64(adap, \
(is_t4(adap->params.chip) ? \
- PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
- T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
-
- t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
- t4_write_reg(adap, EPIO_REG(DATA2), mask1);
- t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
-
- for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
- if (!(map & 1))
- continue;
+ PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
+ T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
+#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
- /* write byte masks */
- t4_write_reg(adap, EPIO_REG(DATA0), mask0);
- t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
- t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
- return -ETIMEDOUT;
-
- /* write CRC */
- t4_write_reg(adap, EPIO_REG(DATA0), crc);
- t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
- t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
- return -ETIMEDOUT;
- }
-#undef EPIO_REG
+ p->octets = GET_STAT(BYTES);
+ p->frames = GET_STAT(FRAMES);
+ p->bcast_frames = GET_STAT(BCAST);
+ p->mcast_frames = GET_STAT(MCAST);
+ p->ucast_frames = GET_STAT(UCAST);
+ p->error_frames = GET_STAT(ERROR);
+
+ p->frames_64 = GET_STAT(64B);
+ p->frames_65_127 = GET_STAT(65B_127B);
+ p->frames_128_255 = GET_STAT(128B_255B);
+ p->frames_256_511 = GET_STAT(256B_511B);
+ p->frames_512_1023 = GET_STAT(512B_1023B);
+ p->frames_1024_1518 = GET_STAT(1024B_1518B);
+ p->frames_1519_max = GET_STAT(1519B_MAX);
+ p->drop = GET_STAT(DROP_FRAMES);
+
+ p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
+ p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
+ p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
+ p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
+ p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
+ p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
+ p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
+ p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
- t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
- return 0;
+#undef GET_STAT
+#undef GET_STAT_COM
}
/* t4_mk_filtdelwr - create a delete filter WR
@@ -3644,33 +4404,38 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
{
memset(wr, 0, sizeof(*wr));
- wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
- wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16));
- wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) |
- FW_FILTER_WR_NOREPLY_V(qid < 0));
- wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F);
+ wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
+ wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
+ wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
+ FW_FILTER_WR_NOREPLY_V(qid < 0));
+ wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
if (qid >= 0)
- wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid));
+ wr->rx_chan_rx_rpl_iq =
+ cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
}
#define INIT_CMD(var, cmd, rd_wr) do { \
- (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \
- FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \
- (var).retval_len16 = htonl(FW_LEN16(var)); \
+ (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
+ FW_CMD_REQUEST_F | \
+ FW_CMD_##rd_wr##_F); \
+ (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
} while (0)
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val)
{
+ u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F |
- FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE));
- c.cycles_to_len16 = htonl(FW_LEN16(c));
- c.u.addrval.addr = htonl(addr);
- c.u.addrval.val = htonl(val);
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ ldst_addrspace);
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.addrval.addr = cpu_to_be32(addr);
+ c.u.addrval.val = cpu_to_be32(val);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3690,19 +4455,22 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 *valp)
{
int ret;
+ u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
- c.cycles_to_len16 = htonl(FW_LEN16(c));
- c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
- FW_LDST_CMD_MMD_V(mmd));
- c.u.mdio.raddr = htons(reg);
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ ldst_addrspace);
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
+ FW_LDST_CMD_MMD_V(mmd));
+ c.u.mdio.raddr = cpu_to_be16(reg);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0)
- *valp = ntohs(c.u.mdio.rval);
+ *valp = be16_to_cpu(c.u.mdio.rval);
return ret;
}
@@ -3720,16 +4488,19 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 val)
{
+ u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
- c.cycles_to_len16 = htonl(FW_LEN16(c));
- c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
- FW_LDST_CMD_MMD_V(mmd));
- c.u.mdio.raddr = htons(reg);
- c.u.mdio.rval = htons(val);
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ ldst_addrspace);
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
+ FW_LDST_CMD_MMD_V(mmd));
+ c.u.mdio.raddr = cpu_to_be16(reg);
+ c.u.mdio.rval = cpu_to_be16(val);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3841,6 +4612,32 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
}
/**
+ * t4_sge_ctxt_flush - flush the SGE context cache
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a FW command through the given mailbox to flush the
+ * SGE context cache.
+ */
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+{
+ int ret;
+ u32 ldst_addrspace;
+ struct fw_ldst_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ ldst_addrspace);
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ return ret;
+}
+
+/**
* t4_fw_hello - establish communication with FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -3863,11 +4660,11 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
retry:
memset(&c, 0, sizeof(c));
INIT_CMD(c, HELLO, WRITE);
- c.err_to_clearinit = htonl(
+ c.err_to_clearinit = cpu_to_be32(
FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
- FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox :
- FW_HELLO_CMD_MBMASTER_M) |
+ FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
+ mbox : FW_HELLO_CMD_MBMASTER_M) |
FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
FW_HELLO_CMD_CLEARINIT_F);
@@ -3888,7 +4685,7 @@ retry:
return ret;
}
- v = ntohl(c.err_to_clearinit);
+ v = be32_to_cpu(c.err_to_clearinit);
master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
if (state) {
if (v & FW_HELLO_CMD_ERR_F)
@@ -4017,7 +4814,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
memset(&c, 0, sizeof(c));
INIT_CMD(c, RESET, WRITE);
- c.val = htonl(reset);
+ c.val = cpu_to_be32(reset);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4050,8 +4847,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
memset(&c, 0, sizeof(c));
INIT_CMD(c, RESET, WRITE);
- c.val = htonl(PIORST_F | PIORSTMODE_F);
- c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
+ c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
+ c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4190,7 +4987,7 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
* the newly loaded firmware will handle this right by checking
* its header flags to see if it advertises the capability.
*/
- reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
+ reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
return t4_fw_restart(adap, mbox, reset);
}
@@ -4321,7 +5118,7 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
}
/**
- * t4_query_params - query FW or device parameters
+ * t4_query_params_rw - query FW or device parameters
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF
@@ -4329,13 +5126,14 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
* @nparams: the number of parameters
* @params: the parameter names
* @val: the parameter values
+ * @rw: Write and read flag
*
* Reads the value of FW or device parameters. Up to 7 parameters can be
* queried at once.
*/
-int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
- unsigned int vf, unsigned int nparams, const u32 *params,
- u32 *val)
+int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val, int rw)
{
int i, ret;
struct fw_params_cmd c;
@@ -4345,22 +5143,35 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
return -EINVAL;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) |
- FW_PARAMS_CMD_VFN_V(vf));
- c.retval_len16 = htonl(FW_LEN16(c));
- for (i = 0; i < nparams; i++, p += 2)
- *p = htonl(*params++);
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_PARAMS_CMD_PFN_V(pf) |
+ FW_PARAMS_CMD_VFN_V(vf));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+ for (i = 0; i < nparams; i++) {
+ *p++ = cpu_to_be32(*params++);
+ if (rw)
+ *p = cpu_to_be32(*(val + i));
+ p++;
+ }
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0)
for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
- *val++ = ntohl(*p);
+ *val++ = be32_to_cpu(*p);
return ret;
}
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val)
+{
+ return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
+}
+
/**
- * t4_set_params_nosleep - sets FW or device parameters
+ * t4_set_params_timeout - sets FW or device parameters
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF
@@ -4368,15 +5179,15 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
* @nparams: the number of parameters
* @params: the parameter names
* @val: the parameter values
+ * @timeout: the timeout time
*
- * Does not ever sleep
* Sets the value of FW or device parameters. Up to 7 parameters can be
* specified at once.
*/
-int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int nparams, const u32 *params,
- const u32 *val)
+ const u32 *val, int timeout)
{
struct fw_params_cmd c;
__be32 *p = &c.param[0].mnem;
@@ -4386,9 +5197,9 @@ int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
- FW_PARAMS_CMD_PFN_V(pf) |
- FW_PARAMS_CMD_VFN_V(vf));
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_PARAMS_CMD_PFN_V(pf) |
+ FW_PARAMS_CMD_VFN_V(vf));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
while (nparams--) {
@@ -4396,7 +5207,7 @@ int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
*p++ = cpu_to_be32(*val++);
}
- return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+ return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
}
/**
@@ -4416,23 +5227,8 @@ int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
const u32 *val)
{
- struct fw_params_cmd c;
- __be32 *p = &c.param[0].mnem;
-
- if (nparams > 7)
- return -EINVAL;
-
- memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) |
- FW_PARAMS_CMD_VFN_V(vf));
- c.retval_len16 = htonl(FW_LEN16(c));
- while (nparams--) {
- *p++ = htonl(*params++);
- *p++ = htonl(*val++);
- }
-
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
+ FW_CMD_MAX_TIMEOUT);
}
/**
@@ -4465,20 +5261,21 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_pfvf_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
- FW_PFVF_CMD_VFN_V(vf));
- c.retval_len16 = htonl(FW_LEN16(c));
- c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
- FW_PFVF_CMD_NIQ_V(rxq));
- c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) |
- FW_PFVF_CMD_PMASK_V(pmask) |
- FW_PFVF_CMD_NEQ_V(txq));
- c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) |
- FW_PFVF_CMD_NEXACTF_V(nexact));
- c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) |
- FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
- FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
+ FW_PFVF_CMD_VFN_V(vf));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
+ FW_PFVF_CMD_NIQ_V(rxq));
+ c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
+ FW_PFVF_CMD_PMASK_V(pmask) |
+ FW_PFVF_CMD_NEQ_V(txq));
+ c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
+ FW_PFVF_CMD_NVI_V(vi) |
+ FW_PFVF_CMD_NEXACTF_V(nexact));
+ c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
+ FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
+ FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4507,10 +5304,10 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
struct fw_vi_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+ FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
c.portid_pkd = FW_VI_CMD_PORTID_V(port);
c.nmac = nmac - 1;
@@ -4532,8 +5329,35 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
}
}
if (rss_size)
- *rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd));
- return FW_VI_CMD_VIID_G(ntohs(c.type_viid));
+ *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
+ return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
+}
+
+/**
+ * t4_free_vi - free a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @viid: virtual interface identifiler
+ *
+ * Free a previously allocated virtual interface.
+ */
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int viid)
+{
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F |
+ FW_VI_CMD_PFN_V(pf) |
+ FW_VI_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
+ c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
}
/**
@@ -4569,14 +5393,16 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid));
- c.retval_len16 = htonl(FW_LEN16(c));
- c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) |
- FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
- FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
- FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
- FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_RXMODE_CMD_VIID_V(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.mtu_to_vlanexen =
+ cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
+ FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
+ FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
+ FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
+ FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
@@ -4606,43 +5432,71 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
{
- int i, ret;
+ int offset, ret = 0;
struct fw_vi_mac_cmd c;
- struct fw_vi_mac_exact *p;
- unsigned int max_naddr = is_t4(adap->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int nfilters = 0;
+ unsigned int max_naddr = adap->params.arch.mps_tcam_size;
+ unsigned int rem = naddr;
- if (naddr > 7)
+ if (naddr > max_naddr)
return -EINVAL;
- memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) |
- FW_VI_MAC_CMD_VIID_V(viid));
- c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) |
- FW_CMD_LEN16_V((naddr + 2) / 2));
-
- for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
- p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
- FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
- memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
- }
+ for (offset = 0; offset < naddr ; /**/) {
+ unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
+ rem : ARRAY_SIZE(c.u.exact));
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[fw_naddr]), 16);
+ struct fw_vi_mac_exact *p;
+ int i;
- ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
- if (ret)
- return ret;
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_V(free) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ c.freemacs_to_len16 =
+ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
+ FW_CMD_LEN16_V(len16));
+
+ for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+ p->valid_to_idx =
+ cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(
+ FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr[offset + i],
+ sizeof(p->macaddr));
+ }
- for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
- u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
+ /* It's okay if we run out of space in our MAC address arena.
+ * Some of the addresses we submit may get stored so we need
+ * to run through the reply to see what the results were ...
+ */
+ ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret && ret != -FW_ENOMEM)
+ break;
- if (idx)
- idx[i] = index >= max_naddr ? 0xffff : index;
- if (index < max_naddr)
- ret++;
- else if (hash)
- *hash |= (1ULL << hash_mac_addr(addr[i]));
+ for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+ u16 index = FW_VI_MAC_CMD_IDX_G(
+ be16_to_cpu(p->valid_to_idx));
+
+ if (idx)
+ idx[offset + i] = (index >= max_naddr ?
+ 0xffff : index);
+ if (index < max_naddr)
+ nfilters++;
+ else if (hash)
+ *hash |= (1ULL <<
+ hash_mac_addr(addr[offset + i]));
+ }
+
+ free = false;
+ offset += fw_naddr;
+ rem -= fw_naddr;
}
+
+ if (ret == 0 || ret == -FW_ENOMEM)
+ ret = nfilters;
return ret;
}
@@ -4671,26 +5525,25 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int ret, mode;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p = c.u.exact;
- unsigned int max_mac_addr = is_t4(adap->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid));
- c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1));
- p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
- FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
- FW_VI_MAC_CMD_IDX_V(idx));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
+ p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
+ FW_VI_MAC_CMD_IDX_V(idx));
memcpy(p->macaddr, addr, sizeof(p->macaddr));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0) {
- ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
+ ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
if (ret >= max_mac_addr)
ret = -ENOMEM;
}
@@ -4714,11 +5567,12 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
struct fw_vi_mac_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid));
- c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F |
- FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
- FW_CMD_LEN16_V(1));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
+ FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
+ FW_CMD_LEN16_V(1));
c.u.hash.hashvec = cpu_to_be64(vec);
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
@@ -4741,12 +5595,13 @@ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
struct fw_vi_enable_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
-
- c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
- FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) |
- FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
+ FW_VI_ENABLE_CMD_EEN_V(tx_en) |
+ FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
+ FW_LEN16(c));
return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4781,10 +5636,11 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
struct fw_vi_enable_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
- c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
- c.blinkdur = htons(nblinks);
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
+ c.blinkdur = cpu_to_be16(nblinks);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4808,14 +5664,14 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_iq_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
- FW_IQ_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c));
- c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype));
- c.iqid = htons(iqid);
- c.fl0id = htons(fl0id);
- c.fl1id = htons(fl1id);
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+ FW_IQ_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
+ c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+ c.iqid = cpu_to_be16(iqid);
+ c.fl0id = cpu_to_be16(fl0id);
+ c.fl1id = cpu_to_be16(fl1id);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4835,11 +5691,12 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_eth_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) |
- FW_EQ_ETH_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
- c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_EQ_ETH_CMD_PFN_V(pf) |
+ FW_EQ_ETH_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
+ c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4859,11 +5716,12 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_ctrl_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) |
- FW_EQ_CTRL_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
- c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_EQ_CTRL_CMD_PFN_V(pf) |
+ FW_EQ_CTRL_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
+ c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4883,11 +5741,12 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_ofld_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) |
- FW_EQ_OFLD_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
- c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_EQ_OFLD_CMD_PFN_V(pf) |
+ FW_EQ_OFLD_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
+ c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4905,11 +5764,11 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
if (opcode == FW_PORT_CMD) { /* link/module state change message */
int speed = 0, fc = 0;
const struct fw_port_cmd *p = (void *)rpl;
- int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid));
+ int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
int port = adap->chan_map[chan];
struct port_info *pi = adap2pinfo(adap, port);
struct link_config *lc = &pi->link_cfg;
- u32 stat = ntohl(p->u.info.lstatus_to_modtype);
+ u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
@@ -5043,6 +5902,22 @@ static int get_flash_params(struct adapter *adap)
return 0;
}
+static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
+{
+ u16 val;
+ u32 pcie_cap;
+
+ pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ pci_read_config_word(adapter->pdev,
+ pcie_cap + PCI_EXP_DEVCTL2, &val);
+ val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
+ val |= range;
+ pci_write_config_word(adapter->pdev,
+ pcie_cap + PCI_EXP_DEVCTL2, val);
+ }
+}
+
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
@@ -5075,9 +5950,30 @@ int t4_prep_adapter(struct adapter *adapter)
switch (ver) {
case CHELSIO_T4:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
+ adapter->params.arch.sge_fl_db = DBPRIO_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 128;
+ adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.vfcount = 128;
break;
case CHELSIO_T5:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+ adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 128;
+ adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.vfcount = 128;
+ break;
+ case CHELSIO_T6:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 256;
+ adapter->params.arch.nchan = 2;
+ adapter->params.arch.vfcount = 256;
break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
@@ -5094,11 +5990,14 @@ int t4_prep_adapter(struct adapter *adapter)
adapter->params.nports = 1;
adapter->params.portvec = 1;
adapter->params.vpd.cclk = 50000;
+
+ /* Set pci completion timeout value to 4 seconds. */
+ set_pcie_completion_timeout(adapter, 0xd);
return 0;
}
/**
- * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
* @adapter: the adapter
* @qid: the Queue ID
* @qtype: the Ingress or Egress type for @qid
@@ -5122,7 +6021,7 @@ int t4_prep_adapter(struct adapter *adapter)
* Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
* then these "Inferred Queue ID" register may not be used.
*/
-int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+int t4_bar2_sge_qregs(struct adapter *adapter,
unsigned int qid,
enum t4_bar2_qtype qtype,
u64 *pbar2_qoffset,
@@ -5154,7 +6053,7 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
* o The BAR2 Queue ID.
* o The BAR2 Queue ID Offset into the BAR2 page.
*/
- bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+ bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
bar2_qid = qid & qpp_mask;
bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
@@ -5223,18 +6122,19 @@ int t4_init_devlog_params(struct adapter *adap)
/* Otherwise, ask the firmware for it's Device Log Parameters.
*/
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
- devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F);
- devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
+ devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+ devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
&devlog_cmd);
if (ret)
return ret;
- devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
+ devlog_meminfo =
+ be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
- dparams->size = ntohl(devlog_cmd.memsize_devlog);
+ dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
return 0;
}
@@ -5255,13 +6155,13 @@ int t4_init_sge_params(struct adapter *adapter)
*/
hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
s_hps = (HOSTPAGESIZEPF0_S +
- (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
+ (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
*/
s_qpp = (QUEUESPERPAGEPF0_S +
- (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
+ (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
@@ -5292,12 +6192,19 @@ int t4_init_tp_params(struct adapter *adap)
/* Cache the adapter's Compressed Filter Mode and global Incress
* Configuration.
*/
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &adap->params.tp.vlan_pri_map, 1,
- TP_VLAN_PRI_MAP_A);
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &adap->params.tp.ingress_config, 1,
- TP_INGRESS_CONFIG_A);
+ if (adap->flags & FW_OK) {
+ t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP_A, 1);
+ t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
+ TP_INGRESS_CONFIG_A, 1);
+ } else {
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP_A);
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &adap->params.tp.ingress_config, 1,
+ TP_INGRESS_CONFIG_A);
+ }
/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
* shift positions of several elements of the Compressed Filter Tuple
@@ -5373,6 +6280,29 @@ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
return field_shift;
}
+int t4_init_rss_mode(struct adapter *adap, int mbox)
+{
+ int i, ret;
+ struct fw_rss_vi_config_cmd rvc;
+
+ memset(&rvc, 0, sizeof(rvc));
+
+ for_each_port(adap, i) {
+ struct port_info *p = adap2pinfo(adap, i);
+
+ rvc.op_to_viid =
+ cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
+ rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
+ ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
+ if (ret)
+ return ret;
+ p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
+ }
+ return 0;
+}
+
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
u8 addr[6];
@@ -5390,10 +6320,10 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
while ((adap->params.portvec & (1 << j)) == 0)
j++;
- c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F |
- FW_PORT_CMD_PORTID_V(j));
- c.action_to_len16 = htonl(
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(j));
+ c.action_to_len16 = cpu_to_be32(
FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
FW_LEN16(c));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
@@ -5411,22 +6341,23 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
adap->port[i]->dev_port = j;
- ret = ntohl(c.u.info.lstatus_to_modtype);
+ ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
FW_PORT_CMD_MDIOADDR_G(ret) : -1;
p->port_type = FW_PORT_CMD_PTYPE_G(ret);
p->mod_type = FW_PORT_MOD_TYPE_NA;
- rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F |
- FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
- rvc.retval_len16 = htonl(FW_LEN16(rvc));
+ rvc.op_to_viid =
+ cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
+ rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
if (ret)
return ret;
- p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
+ p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
- init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
+ init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
j++;
}
return 0;
@@ -5717,3 +6648,130 @@ void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
cfg | adap->params.tp.la_mask);
}
+
+/* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
+ * seconds). If we find one of the SGE Ingress DMA State Machines in the same
+ * state for more than the Warning Threshold then we'll issue a warning about
+ * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
+ * appears to be hung every Warning Repeat second till the situation clears.
+ * If the situation clears, we'll note that as well.
+ */
+#define SGE_IDMA_WARN_THRESH 1
+#define SGE_IDMA_WARN_REPEAT 300
+
+/**
+ * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
+ * @adapter: the adapter
+ * @idma: the adapter IDMA Monitor state
+ *
+ * Initialize the state of an SGE Ingress DMA Monitor.
+ */
+void t4_idma_monitor_init(struct adapter *adapter,
+ struct sge_idma_monitor_state *idma)
+{
+ /* Initialize the state variables for detecting an SGE Ingress DMA
+ * hang. The SGE has internal counters which count up on each clock
+ * tick whenever the SGE finds its Ingress DMA State Engines in the
+ * same state they were on the previous clock tick. The clock used is
+ * the Core Clock so we have a limit on the maximum "time" they can
+ * record; typically a very small number of seconds. For instance,
+ * with a 600MHz Core Clock, we can only count up to a bit more than
+ * 7s. So we'll synthesize a larger counter in order to not run the
+ * risk of having the "timers" overflow and give us the flexibility to
+ * maintain a Hung SGE State Machine of our own which operates across
+ * a longer time frame.
+ */
+ idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
+ idma->idma_stalled[0] = 0;
+ idma->idma_stalled[1] = 0;
+}
+
+/**
+ * t4_idma_monitor - monitor SGE Ingress DMA state
+ * @adapter: the adapter
+ * @idma: the adapter IDMA Monitor state
+ * @hz: number of ticks/second
+ * @ticks: number of ticks since the last IDMA Monitor call
+ */
+void t4_idma_monitor(struct adapter *adapter,
+ struct sge_idma_monitor_state *idma,
+ int hz, int ticks)
+{
+ int i, idma_same_state_cnt[2];
+
+ /* Read the SGE Debug Ingress DMA Same State Count registers. These
+ * are counters inside the SGE which count up on each clock when the
+ * SGE finds its Ingress DMA State Engines in the same states they
+ * were in the previous clock. The counters will peg out at
+ * 0xffffffff without wrapping around so once they pass the 1s
+ * threshold they'll stay above that till the IDMA state changes.
+ */
+ t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
+ idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
+ idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+
+ for (i = 0; i < 2; i++) {
+ u32 debug0, debug11;
+
+ /* If the Ingress DMA Same State Counter ("timer") is less
+ * than 1s, then we can reset our synthesized Stall Timer and
+ * continue. If we have previously emitted warnings about a
+ * potential stalled Ingress Queue, issue a note indicating
+ * that the Ingress Queue has resumed forward progress.
+ */
+ if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
+ if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
+ dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
+ "resumed after %d seconds\n",
+ i, idma->idma_qid[i],
+ idma->idma_stalled[i] / hz);
+ idma->idma_stalled[i] = 0;
+ continue;
+ }
+
+ /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
+ * domain. The first time we get here it'll be because we
+ * passed the 1s Threshold; each additional time it'll be
+ * because the RX Timer Callback is being fired on its regular
+ * schedule.
+ *
+ * If the stall is below our Potential Hung Ingress Queue
+ * Warning Threshold, continue.
+ */
+ if (idma->idma_stalled[i] == 0) {
+ idma->idma_stalled[i] = hz;
+ idma->idma_warn[i] = 0;
+ } else {
+ idma->idma_stalled[i] += ticks;
+ idma->idma_warn[i] -= ticks;
+ }
+
+ if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
+ continue;
+
+ /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
+ */
+ if (idma->idma_warn[i] > 0)
+ continue;
+ idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
+
+ /* Read and save the SGE IDMA State and Queue ID information.
+ * We do this every time in case it changes across time ...
+ * can't be too careful ...
+ */
+ t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
+ debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+ idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
+
+ t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
+ debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+ idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
+
+ dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
+ "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
+ i, idma->idma_qid[i], idma->idma_state[i],
+ idma->idma_stalled[i] / hz,
+ debug0, debug11);
+ t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 380b15c0417a..f9a2cb164737 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -52,8 +52,6 @@ enum {
MBOX_LEN = 64, /* mailbox size in bytes */
TRACE_LEN = 112, /* length of trace data and mask */
FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
- NWOL_PAT = 8, /* # of WoL patterns */
- WOL_PAT_LEN = 128, /* length of WoL patterns */
};
enum {
@@ -152,17 +150,33 @@ struct rsp_ctrl {
};
};
-#define RSPD_NEWBUF 0x80000000U
-#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU)
-#define RSPD_QID(x) RSPD_LEN(x)
+#define RSPD_NEWBUF_S 31
+#define RSPD_NEWBUF_V(x) ((x) << RSPD_NEWBUF_S)
+#define RSPD_NEWBUF_F RSPD_NEWBUF_V(1U)
-#define RSPD_GEN(x) ((x) >> 7)
-#define RSPD_TYPE(x) (((x) >> 4) & 3)
+#define RSPD_LEN_S 0
+#define RSPD_LEN_M 0x7fffffff
+#define RSPD_LEN_G(x) (((x) >> RSPD_LEN_S) & RSPD_LEN_M)
-#define V_QINTR_CNT_EN 0x0
-#define QINTR_CNT_EN 0x1
-#define QINTR_TIMER_IDX(x) ((x) << 1)
-#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
+#define RSPD_QID_S RSPD_LEN_S
+#define RSPD_QID_M RSPD_LEN_M
+#define RSPD_QID_G(x) RSPD_LEN_G(x)
+
+#define RSPD_GEN_S 7
+
+#define RSPD_TYPE_S 4
+#define RSPD_TYPE_M 0x3
+#define RSPD_TYPE_G(x) (((x) >> RSPD_TYPE_S) & RSPD_TYPE_M)
+
+/* Rx queue interrupt deferral fields: counter enable and timer index */
+#define QINTR_CNT_EN_S 0
+#define QINTR_CNT_EN_V(x) ((x) << QINTR_CNT_EN_S)
+#define QINTR_CNT_EN_F QINTR_CNT_EN_V(1U)
+
+#define QINTR_TIMER_IDX_S 1
+#define QINTR_TIMER_IDX_M 0x7
+#define QINTR_TIMER_IDX_V(x) ((x) << QINTR_TIMER_IDX_S)
+#define QINTR_TIMER_IDX_G(x) (((x) >> QINTR_TIMER_IDX_S) & QINTR_TIMER_IDX_M)
/*
* Flash layout.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 30a2f56e99c2..132cb8fc0bf7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -634,26 +634,9 @@ struct cpl_tid_release {
struct cpl_tx_pkt_core {
__be32 ctrl0;
-#define TXPKT_VF(x) ((x) << 0)
-#define TXPKT_PF(x) ((x) << 8)
-#define TXPKT_VF_VLD (1 << 11)
-#define TXPKT_OVLAN_IDX(x) ((x) << 12)
-#define TXPKT_INTF(x) ((x) << 16)
-#define TXPKT_INS_OVLAN (1 << 21)
-#define TXPKT_OPCODE(x) ((x) << 24)
__be16 pack;
__be16 len;
__be64 ctrl1;
-#define TXPKT_CSUM_END(x) ((x) << 12)
-#define TXPKT_CSUM_START(x) ((x) << 20)
-#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20)
-#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30)
-#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34)
-#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40)
-#define TXPKT_VLAN(x) ((u64)(x) << 44)
-#define TXPKT_VLAN_VLD (1ULL << 60)
-#define TXPKT_IPCSUM_DIS (1ULL << 62)
-#define TXPKT_L4CSUM_DIS (1ULL << 63)
};
struct cpl_tx_pkt {
@@ -663,16 +646,69 @@ struct cpl_tx_pkt {
#define cpl_tx_pkt_xt cpl_tx_pkt
+/* cpl_tx_pkt_core.ctrl0 fields */
+#define TXPKT_VF_S 0
+#define TXPKT_VF_V(x) ((x) << TXPKT_VF_S)
+
+#define TXPKT_PF_S 8
+#define TXPKT_PF_V(x) ((x) << TXPKT_PF_S)
+
+#define TXPKT_VF_VLD_S 11
+#define TXPKT_VF_VLD_V(x) ((x) << TXPKT_VF_VLD_S)
+#define TXPKT_VF_VLD_F TXPKT_VF_VLD_V(1U)
+
+#define TXPKT_OVLAN_IDX_S 12
+#define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
+
+#define TXPKT_INTF_S 16
+#define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
+
+#define TXPKT_INS_OVLAN_S 21
+#define TXPKT_INS_OVLAN_V(x) ((x) << TXPKT_INS_OVLAN_S)
+#define TXPKT_INS_OVLAN_F TXPKT_INS_OVLAN_V(1U)
+
+#define TXPKT_OPCODE_S 24
+#define TXPKT_OPCODE_V(x) ((x) << TXPKT_OPCODE_S)
+
+/* cpl_tx_pkt_core.ctrl1 fields */
+#define TXPKT_CSUM_END_S 12
+#define TXPKT_CSUM_END_V(x) ((x) << TXPKT_CSUM_END_S)
+
+#define TXPKT_CSUM_START_S 20
+#define TXPKT_CSUM_START_V(x) ((x) << TXPKT_CSUM_START_S)
+
+#define TXPKT_IPHDR_LEN_S 20
+#define TXPKT_IPHDR_LEN_V(x) ((__u64)(x) << TXPKT_IPHDR_LEN_S)
+
+#define TXPKT_CSUM_LOC_S 30
+#define TXPKT_CSUM_LOC_V(x) ((__u64)(x) << TXPKT_CSUM_LOC_S)
+
+#define TXPKT_ETHHDR_LEN_S 34
+#define TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << TXPKT_ETHHDR_LEN_S)
+
+#define T6_TXPKT_ETHHDR_LEN_S 32
+#define T6_TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << T6_TXPKT_ETHHDR_LEN_S)
+
+#define TXPKT_CSUM_TYPE_S 40
+#define TXPKT_CSUM_TYPE_V(x) ((__u64)(x) << TXPKT_CSUM_TYPE_S)
+
+#define TXPKT_VLAN_S 44
+#define TXPKT_VLAN_V(x) ((__u64)(x) << TXPKT_VLAN_S)
+
+#define TXPKT_VLAN_VLD_S 60
+#define TXPKT_VLAN_VLD_V(x) ((__u64)(x) << TXPKT_VLAN_VLD_S)
+#define TXPKT_VLAN_VLD_F TXPKT_VLAN_VLD_V(1ULL)
+
+#define TXPKT_IPCSUM_DIS_S 62
+#define TXPKT_IPCSUM_DIS_V(x) ((__u64)(x) << TXPKT_IPCSUM_DIS_S)
+#define TXPKT_IPCSUM_DIS_F TXPKT_IPCSUM_DIS_V(1ULL)
+
+#define TXPKT_L4CSUM_DIS_S 63
+#define TXPKT_L4CSUM_DIS_V(x) ((__u64)(x) << TXPKT_L4CSUM_DIS_S)
+#define TXPKT_L4CSUM_DIS_F TXPKT_L4CSUM_DIS_V(1ULL)
+
struct cpl_tx_pkt_lso_core {
__be32 lso_ctrl;
-#define LSO_TCPHDR_LEN(x) ((x) << 0)
-#define LSO_IPHDR_LEN(x) ((x) << 4)
-#define LSO_ETHHDR_LEN(x) ((x) << 16)
-#define LSO_IPV6(x) ((x) << 20)
-#define LSO_LAST_SLICE (1 << 22)
-#define LSO_FIRST_SLICE (1 << 23)
-#define LSO_OPCODE(x) ((x) << 24)
-#define LSO_T5_XFER_SIZE(x) ((x) << 0)
__be16 ipid_ofst;
__be16 mss;
__be32 seqno_offset;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 326674b19983..af3462db5adb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -418,6 +418,20 @@
#define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4
#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
+#define SGE_ERROR_STATS_A 0x1100
+
+#define UNCAPTURED_ERROR_S 18
+#define UNCAPTURED_ERROR_V(x) ((x) << UNCAPTURED_ERROR_S)
+#define UNCAPTURED_ERROR_F UNCAPTURED_ERROR_V(1U)
+
+#define ERROR_QID_VALID_S 17
+#define ERROR_QID_VALID_V(x) ((x) << ERROR_QID_VALID_S)
+#define ERROR_QID_VALID_F ERROR_QID_VALID_V(1U)
+
+#define ERROR_QID_S 0
+#define ERROR_QID_M 0x1ffffU
+#define ERROR_QID_G(x) (((x) >> ERROR_QID_S) & ERROR_QID_M)
+
#define HP_INT_THRESH_S 28
#define HP_INT_THRESH_M 0xfU
#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
@@ -448,8 +462,13 @@
#define SGE_STAT_MATCH_A 0x10e8
#define SGE_STAT_CFG_A 0x10ec
+#define STATMODE_S 2
+#define STATMODE_V(x) ((x) << STATMODE_S)
+
#define STATSOURCE_T5_S 9
+#define STATSOURCE_T5_M 0xfU
#define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
+#define STATSOURCE_T5_G(x) (((x) >> STATSOURCE_T5_S) & STATSOURCE_T5_M)
#define SGE_DBFIFO_STATUS2_A 0x1118
@@ -705,6 +724,10 @@
#define REGISTER_S 0
#define REGISTER_V(x) ((x) << REGISTER_S)
+#define T6_ENABLE_S 31
+#define T6_ENABLE_V(x) ((x) << T6_ENABLE_S)
+#define T6_ENABLE_F T6_ENABLE_V(1U)
+
#define PFNUM_S 0
#define PFNUM_V(x) ((x) << PFNUM_S)
@@ -1399,6 +1422,8 @@
#define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U)
#define TP_MIB_MAC_IN_ERR_0_A 0x0
+#define TP_MIB_HDR_IN_ERR_0_A 0x4
+#define TP_MIB_TCP_IN_ERR_0_A 0x8
#define TP_MIB_TCP_OUT_RST_A 0xc
#define TP_MIB_TCP_IN_SEG_HI_A 0x10
#define TP_MIB_TCP_IN_SEG_LO_A 0x11
@@ -1407,11 +1432,19 @@
#define TP_MIB_TCP_RXT_SEG_HI_A 0x14
#define TP_MIB_TCP_RXT_SEG_LO_A 0x15
#define TP_MIB_TNL_CNG_DROP_0_A 0x18
+#define TP_MIB_OFD_CHN_DROP_0_A 0x1c
#define TP_MIB_TCP_V6IN_ERR_0_A 0x28
#define TP_MIB_TCP_V6OUT_RST_A 0x2c
#define TP_MIB_OFD_ARP_DROP_A 0x36
+#define TP_MIB_CPL_IN_REQ_0_A 0x38
+#define TP_MIB_CPL_OUT_RSP_0_A 0x3c
#define TP_MIB_TNL_DROP_0_A 0x44
+#define TP_MIB_FCOE_DDP_0_A 0x48
+#define TP_MIB_FCOE_DROP_0_A 0x4c
+#define TP_MIB_FCOE_BYTE_0_HI_A 0x50
#define TP_MIB_OFD_VLN_DROP_0_A 0x58
+#define TP_MIB_USM_PKTS_A 0x5c
+#define TP_MIB_RQE_DFR_PKT_A 0x64
#define ULP_TX_INT_CAUSE_A 0x8dcc
@@ -1572,6 +1605,7 @@
#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528
#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
@@ -2054,6 +2088,11 @@
#define VFLKPIDX_M 0xffU
#define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M)
+#define T6_VFWRADDR_S 8
+#define T6_VFWRADDR_M 0xffU
+#define T6_VFWRADDR_V(x) ((x) << T6_VFWRADDR_S)
+#define T6_VFWRADDR_G(x) (((x) >> T6_VFWRADDR_S) & T6_VFWRADDR_M)
+
#define TP_RSS_CONFIG_CNG_A 0x7e04
#define TP_RSS_SECRET_KEY0_A 0x40
#define TP_RSS_PF0_CONFIG_A 0x30
@@ -2175,7 +2214,28 @@
#define MPS_RX_PERR_INT_CAUSE_A 0x11074
#define MPS_CLS_TCAM_Y_L_A 0xf000
+#define MPS_CLS_TCAM_DATA0_A 0xf000
+#define MPS_CLS_TCAM_DATA1_A 0xf004
+
+#define DMACH_S 0
+#define DMACH_M 0xffffU
+#define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M)
+
#define MPS_CLS_TCAM_X_L_A 0xf008
+#define MPS_CLS_TCAM_DATA2_CTL_A 0xf008
+
+#define CTLCMDTYPE_S 31
+#define CTLCMDTYPE_V(x) ((x) << CTLCMDTYPE_S)
+#define CTLCMDTYPE_F CTLCMDTYPE_V(1U)
+
+#define CTLTCAMSEL_S 25
+#define CTLTCAMSEL_V(x) ((x) << CTLTCAMSEL_S)
+
+#define CTLTCAMINDEX_S 17
+#define CTLTCAMINDEX_V(x) ((x) << CTLTCAMINDEX_S)
+
+#define CTLXYBITSEL_S 16
+#define CTLXYBITSEL_V(x) ((x) << CTLXYBITSEL_S)
#define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16)
#define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
@@ -2184,6 +2244,45 @@
#define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
#define MPS_CLS_SRAM_L_A 0xe000
+
+#define T6_MULTILISTEN0_S 26
+
+#define T6_SRAM_PRIO3_S 23
+#define T6_SRAM_PRIO3_M 0x7U
+#define T6_SRAM_PRIO3_G(x) (((x) >> T6_SRAM_PRIO3_S) & T6_SRAM_PRIO3_M)
+
+#define T6_SRAM_PRIO2_S 20
+#define T6_SRAM_PRIO2_M 0x7U
+#define T6_SRAM_PRIO2_G(x) (((x) >> T6_SRAM_PRIO2_S) & T6_SRAM_PRIO2_M)
+
+#define T6_SRAM_PRIO1_S 17
+#define T6_SRAM_PRIO1_M 0x7U
+#define T6_SRAM_PRIO1_G(x) (((x) >> T6_SRAM_PRIO1_S) & T6_SRAM_PRIO1_M)
+
+#define T6_SRAM_PRIO0_S 14
+#define T6_SRAM_PRIO0_M 0x7U
+#define T6_SRAM_PRIO0_G(x) (((x) >> T6_SRAM_PRIO0_S) & T6_SRAM_PRIO0_M)
+
+#define T6_SRAM_VLD_S 13
+#define T6_SRAM_VLD_V(x) ((x) << T6_SRAM_VLD_S)
+#define T6_SRAM_VLD_F T6_SRAM_VLD_V(1U)
+
+#define T6_REPLICATE_S 12
+#define T6_REPLICATE_V(x) ((x) << T6_REPLICATE_S)
+#define T6_REPLICATE_F T6_REPLICATE_V(1U)
+
+#define T6_PF_S 9
+#define T6_PF_M 0x7U
+#define T6_PF_G(x) (((x) >> T6_PF_S) & T6_PF_M)
+
+#define T6_VF_VALID_S 8
+#define T6_VF_VALID_V(x) ((x) << T6_VF_VALID_S)
+#define T6_VF_VALID_F T6_VF_VALID_V(1U)
+
+#define T6_VF_S 0
+#define T6_VF_M 0xffU
+#define T6_VF_G(x) (((x) >> T6_VF_S) & T6_VF_M)
+
#define MPS_CLS_SRAM_H_A 0xe004
#define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8)
@@ -2433,6 +2532,8 @@
#define CIM_F CIM_V(1U)
#define MC1_S 31
+#define MC1_V(x) ((x) << MC1_S)
+#define MC1_F MC1_V(1U)
#define PL_INT_ENABLE_A 0x19410
#define PL_INT_MAP0_A 0x19414
@@ -2463,6 +2564,18 @@
#define REV_V(x) ((x) << REV_S)
#define REV_G(x) (((x) >> REV_S) & REV_M)
+#define T6_UNKNOWNCMD_S 3
+#define T6_UNKNOWNCMD_V(x) ((x) << T6_UNKNOWNCMD_S)
+#define T6_UNKNOWNCMD_F T6_UNKNOWNCMD_V(1U)
+
+#define T6_LIP0_S 2
+#define T6_LIP0_V(x) ((x) << T6_LIP0_S)
+#define T6_LIP0_F T6_LIP0_V(1U)
+
+#define T6_LIPMISS_S 1
+#define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S)
+#define T6_LIPMISS_F T6_LIPMISS_V(1U)
+
#define LE_DB_INT_CAUSE_A 0x19c3c
#define REQQPARERR_S 16
@@ -2485,6 +2598,14 @@
#define LIP0_V(x) ((x) << LIP0_S)
#define LIP0_F LIP0_V(1U)
+#define TCAMINTPERR_S 13
+#define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S)
+#define TCAMINTPERR_F TCAMINTPERR_V(1U)
+
+#define SSRAMINTPERR_S 10
+#define SSRAMINTPERR_V(x) ((x) << SSRAMINTPERR_S)
+#define SSRAMINTPERR_F SSRAMINTPERR_V(1U)
+
#define NCSI_INT_CAUSE_A 0x1a0d8
#define CIM_DM_PRTY_ERR_S 8
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
index 19b2dcf6acde..7bdee3bf75ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -61,6 +61,30 @@
#define SGE_TIMERREGS 6
#define TIMERREG_COUNTER0_X 0
+#define FETCHBURSTMIN_64B_X 2
+
+#define FETCHBURSTMAX_256B_X 2
+#define FETCHBURSTMAX_512B_X 3
+
+#define HOSTFCMODE_STATUS_PAGE_X 2
+
+#define CIDXFLUSHTHRESH_32_X 5
+
+#define UPDATEDELIVERY_INTERRUPT_X 1
+
+#define RSPD_TYPE_FLBUF_X 0
+#define RSPD_TYPE_CPL_X 1
+#define RSPD_TYPE_INTR_X 2
+
+/* Congestion Manager Definitions.
+ */
+#define CONMCTXT_CNGTPMODE_S 19
+#define CONMCTXT_CNGTPMODE_V(x) ((x) << CONMCTXT_CNGTPMODE_S)
+#define CONMCTXT_CNGCHMAP_S 0
+#define CONMCTXT_CNGCHMAP_V(x) ((x) << CONMCTXT_CNGCHMAP_S)
+#define CONMCTXT_CNGTPMODE_CHANNEL_X 2
+#define CONMCTXT_CNGTPMODE_QUEUE_X 1
+
/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
* The User Doorbells are each 128 bytes in length with a Simple Doorbell at
* offsets 8x and a Write Combining single 64-byte Egress Queue Unit
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 03fbfd1fb3df..ab4674684acc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -772,7 +772,7 @@ struct fw_ldst_cmd {
} addrval;
struct fw_ldst_idctxt {
__be32 physid;
- __be32 msg_pkd;
+ __be32 msg_ctxtflush;
__be32 ctxt_data7;
__be32 ctxt_data6;
__be32 ctxt_data5;
@@ -788,15 +788,27 @@ struct fw_ldst_cmd {
__be16 vctl;
__be16 rval;
} mdio;
- struct fw_ldst_mps {
- __be16 fid_ctl;
- __be16 rplcpf_pkd;
- __be32 rplc127_96;
- __be32 rplc95_64;
- __be32 rplc63_32;
- __be32 rplc31_0;
- __be32 atrb;
- __be16 vlan[16];
+ union fw_ldst_mps {
+ struct fw_ldst_mps_rplc {
+ __be16 fid_idx;
+ __be16 rplcpf_pkd;
+ __be32 rplc255_224;
+ __be32 rplc223_192;
+ __be32 rplc191_160;
+ __be32 rplc159_128;
+ __be32 rplc127_96;
+ __be32 rplc95_64;
+ __be32 rplc63_32;
+ __be32 rplc31_0;
+ } rplc;
+ struct fw_ldst_mps_atrb {
+ __be16 fid_mpsid;
+ __be16 r2[3];
+ __be32 r3[2];
+ __be32 r4;
+ __be32 atrb;
+ __be16 vlan[16];
+ } atrb;
} mps;
struct fw_ldst_func {
u8 access_ctl;
@@ -822,6 +834,10 @@ struct fw_ldst_cmd {
#define FW_LDST_CMD_MSG_S 31
#define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S)
+#define FW_LDST_CMD_CTXTFLUSH_S 30
+#define FW_LDST_CMD_CTXTFLUSH_V(x) ((x) << FW_LDST_CMD_CTXTFLUSH_S)
+#define FW_LDST_CMD_CTXTFLUSH_F FW_LDST_CMD_CTXTFLUSH_V(1U)
+
#define FW_LDST_CMD_PADDR_S 8
#define FW_LDST_CMD_PADDR_V(x) ((x) << FW_LDST_CMD_PADDR_S)
@@ -831,8 +847,8 @@ struct fw_ldst_cmd {
#define FW_LDST_CMD_FID_S 15
#define FW_LDST_CMD_FID_V(x) ((x) << FW_LDST_CMD_FID_S)
-#define FW_LDST_CMD_CTL_S 0
-#define FW_LDST_CMD_CTL_V(x) ((x) << FW_LDST_CMD_CTL_S)
+#define FW_LDST_CMD_IDX_S 0
+#define FW_LDST_CMD_IDX_V(x) ((x) << FW_LDST_CMD_IDX_S)
#define FW_LDST_CMD_RPLCPF_S 0
#define FW_LDST_CMD_RPLCPF_V(x) ((x) << FW_LDST_CMD_RPLCPF_S)
@@ -1061,6 +1077,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
FW_PARAMS_PARAM_DEV_CF = 0x0D,
+ FW_PARAMS_PARAM_DEV_PHYFW = 0x0F,
FW_PARAMS_PARAM_DEV_DIAG = 0x11,
FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */
FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
@@ -1123,6 +1140,12 @@ enum fw_params_param_dmaq {
FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
+ FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20,
+};
+
+enum fw_params_param_dev_phyfw {
+ FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD = 0x00,
+ FW_PARAMS_PARAM_DEV_PHYFW_VERSION = 0x01,
};
enum fw_params_param_dev_diag {
@@ -1377,6 +1400,7 @@ struct fw_iq_cmd {
#define FW_IQ_CMD_IQFLINTCONGEN_S 27
#define FW_IQ_CMD_IQFLINTCONGEN_V(x) ((x) << FW_IQ_CMD_IQFLINTCONGEN_S)
+#define FW_IQ_CMD_IQFLINTCONGEN_F FW_IQ_CMD_IQFLINTCONGEN_V(1U)
#define FW_IQ_CMD_IQFLINTISCSIC_S 26
#define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S)
@@ -1399,6 +1423,7 @@ struct fw_iq_cmd {
#define FW_IQ_CMD_FL0CONGCIF_S 11
#define FW_IQ_CMD_FL0CONGCIF_V(x) ((x) << FW_IQ_CMD_FL0CONGCIF_S)
+#define FW_IQ_CMD_FL0CONGCIF_F FW_IQ_CMD_FL0CONGCIF_V(1U)
#define FW_IQ_CMD_FL0ONCHIP_S 10
#define FW_IQ_CMD_FL0ONCHIP_V(x) ((x) << FW_IQ_CMD_FL0ONCHIP_S)
@@ -1589,6 +1614,7 @@ struct fw_eq_eth_cmd {
#define FW_EQ_ETH_CMD_FETCHRO_S 22
#define FW_EQ_ETH_CMD_FETCHRO_V(x) ((x) << FW_EQ_ETH_CMD_FETCHRO_S)
+#define FW_EQ_ETH_CMD_FETCHRO_F FW_EQ_ETH_CMD_FETCHRO_V(1U)
#define FW_EQ_ETH_CMD_HOSTFCMODE_S 20
#define FW_EQ_ETH_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_ETH_CMD_HOSTFCMODE_S)
@@ -2526,13 +2552,8 @@ enum fw_port_mod_sub_type {
FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC,
};
-/* port stats */
-#define FW_NUM_PORT_STATS 50
-#define FW_NUM_PORT_TX_STATS 23
-#define FW_NUM_PORT_RX_STATS 27
-
enum fw_port_stats_tx_index {
- FW_STAT_TX_PORT_BYTES_IX,
+ FW_STAT_TX_PORT_BYTES_IX = 0,
FW_STAT_TX_PORT_FRAMES_IX,
FW_STAT_TX_PORT_BCAST_IX,
FW_STAT_TX_PORT_MCAST_IX,
@@ -2554,11 +2575,12 @@ enum fw_port_stats_tx_index {
FW_STAT_TX_PORT_PPP4_IX,
FW_STAT_TX_PORT_PPP5_IX,
FW_STAT_TX_PORT_PPP6_IX,
- FW_STAT_TX_PORT_PPP7_IX
+ FW_STAT_TX_PORT_PPP7_IX,
+ FW_NUM_PORT_TX_STATS
};
enum fw_port_stat_rx_index {
- FW_STAT_RX_PORT_BYTES_IX,
+ FW_STAT_RX_PORT_BYTES_IX = 0,
FW_STAT_RX_PORT_FRAMES_IX,
FW_STAT_RX_PORT_BCAST_IX,
FW_STAT_RX_PORT_MCAST_IX,
@@ -2584,9 +2606,14 @@ enum fw_port_stat_rx_index {
FW_STAT_RX_PORT_PPP5_IX,
FW_STAT_RX_PORT_PPP6_IX,
FW_STAT_RX_PORT_PPP7_IX,
- FW_STAT_RX_PORT_LESS_64B_IX
+ FW_STAT_RX_PORT_LESS_64B_IX,
+ FW_STAT_RX_PORT_MAC_ERROR_IX,
+ FW_NUM_PORT_RX_STATS
};
+/* port stats */
+#define FW_NUM_PORT_STATS (FW_NUM_PORT_TX_STATS + FW_NUM_PORT_RX_STATS)
+
struct fw_port_stats_cmd {
__be32 op_to_portid;
__be32 retval_len16;
@@ -3015,7 +3042,8 @@ struct fw_hdr {
enum fw_hdr_chip {
FW_HDR_CHIP_T4,
- FW_HDR_CHIP_T5
+ FW_HDR_CHIP_T5,
+ FW_HDR_CHIP_T6
};
#define FW_HDR_FW_VER_MAJOR_S 24
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index b9d1cbac0eee..32b213559b02 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -45,4 +45,9 @@
#define T5FW_VERSION_MICRO 0x20
#define T5FW_VERSION_BUILD 0x00
+#define T6FW_VERSION_MAJOR 0x01
+#define T6FW_VERSION_MINOR 0x0D
+#define T6FW_VERSION_MICRO 0x2D
+#define T6FW_VERSION_BUILD 0x00
+
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 1d893b0b7ddf..b2b5e5bbe04c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1021,7 +1021,7 @@ static int closest_thres(const struct sge *s, int thres)
static unsigned int qtimer_val(const struct adapter *adapter,
const struct sge_rspq *rspq)
{
- unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params);
+ unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
return timer_idx < SGE_NTIMERS
? adapter->sge.timer_val[timer_idx]
@@ -1086,8 +1086,8 @@ static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
* Update the response queue's interrupt coalescing parameters and
* return success.
*/
- rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
- (cnt > 0 ? QINTR_CNT_EN : 0));
+ rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
+ QINTR_CNT_EN_V(cnt > 0));
return 0;
}
@@ -1439,7 +1439,7 @@ static int cxgb4vf_get_coalesce(struct net_device *dev,
coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
coalesce->rx_max_coalesced_frames =
- ((rspq->intr_params & QINTR_CNT_EN)
+ ((rspq->intr_params & QINTR_CNT_EN_F)
? adapter->sge.counter_val[rspq->pktcnt_idx]
: 0);
return 0;
@@ -2393,8 +2393,9 @@ static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
u8 pkt_cnt_idx, unsigned int size,
unsigned int iqe_size)
{
- rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
- (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0));
+ rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
+ (pkt_cnt_idx < SGE_NCOUNTERS ?
+ QINTR_CNT_EN_F : 0));
rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
? pkt_cnt_idx
: 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 482f6de6817d..ad53e5ad2acd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -524,7 +524,7 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
*/
static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
{
- u32 val;
+ u32 val = adapter->params.arch.sge_fl_db;
/* The SGE keeps track of its Producer and Consumer Indices in terms
* of Egress Queue Units so we can only tell it about integral numbers
@@ -532,11 +532,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
*/
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
if (is_t4(adapter->params.chip))
- val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
+ val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
else
- val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
- DBTYPE_F;
- val |= DBPRIO_F;
+ val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
/* Make sure all memory writes to the Free List queue are
* committed before we tell the hardware about them.
@@ -1084,7 +1082,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
* Figure out what HW csum a packet wants and return the appropriate control
* bits.
*/
-static u64 hwcsum(const struct sk_buff *skb)
+static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
{
int csum_type;
const struct iphdr *iph = ip_hdr(skb);
@@ -1100,7 +1098,7 @@ nocsum:
* unknown protocol, disable HW csum
* and hope a bad packet is detected
*/
- return TXPKT_L4CSUM_DIS;
+ return TXPKT_L4CSUM_DIS_F;
}
} else {
/*
@@ -1116,16 +1114,21 @@ nocsum:
goto nocsum;
}
- if (likely(csum_type >= TX_CSUM_TCPIP))
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
- TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
- else {
+ if (likely(csum_type >= TX_CSUM_TCPIP)) {
+ u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
+ int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+
+ if (chip <= CHELSIO_T5)
+ hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ else
+ hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
+ } else {
int start = skb_transport_offset(skb);
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_CSUM_START(start) |
- TXPKT_CSUM_LOC(start + skb->csum_offset);
+ return TXPKT_CSUM_TYPE_V(csum_type) |
+ TXPKT_CSUM_START_V(start) |
+ TXPKT_CSUM_LOC_V(start + skb->csum_offset);
}
}
@@ -1160,7 +1163,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
u32 wr_mid;
u64 cntrl, *end;
- int qidx, credits;
+ int qidx, credits, max_pkt_len;
unsigned int flits, ndesc;
struct adapter *adapter;
struct sge_eth_txq *txq;
@@ -1183,6 +1186,13 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len < fw_hdr_copy_len))
goto out_free;
+ /* Discard the packet if the length is greater than mtu */
+ max_pkt_len = ETH_HLEN + dev->mtu;
+ if (skb_vlan_tag_present(skb))
+ max_pkt_len += VLAN_HLEN;
+ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ goto out_free;
+
/*
* Figure out which TX Queue we're going to use.
*/
@@ -1281,29 +1291,35 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* Fill in the LSO CPL message.
*/
lso->lso_ctrl =
- cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE |
- LSO_LAST_SLICE |
- LSO_IPV6(v6) |
- LSO_ETHHDR_LEN(eth_xtra_len/4) |
- LSO_IPHDR_LEN(l3hdr_len/4) |
- LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+ cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F |
+ LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
lso->ipid_ofst = cpu_to_be16(0);
lso->mss = cpu_to_be16(ssi->gso_size);
lso->seqno_offset = cpu_to_be32(0);
if (is_t4(adapter->params.chip))
lso->len = cpu_to_be32(skb->len);
else
- lso->len = cpu_to_be32(LSO_T5_XFER_SIZE(skb->len));
+ lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
/*
* Set up TX Packet CPL pointer, control word and perform
* accounting.
*/
cpl = (void *)(lso + 1);
- cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN(l3hdr_len) |
- TXPKT_ETHHDR_LEN(eth_xtra_len));
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
txq->tso++;
txq->tx_cso += ssi->gso_segs;
} else {
@@ -1320,10 +1336,11 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
*/
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+ cntrl = hwcsum(adapter->params.chip, skb) |
+ TXPKT_IPCSUM_DIS_F;
txq->tx_cso++;
} else
- cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
}
/*
@@ -1332,15 +1349,15 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (skb_vlan_tag_present(skb)) {
txq->vlan_ins++;
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
}
/*
* Fill in the TX Packet CPL message header.
*/
- cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
- TXPKT_INTF(pi->port_id) |
- TXPKT_PF(0));
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->port_id) |
+ TXPKT_PF_V(0));
cpl->pack = cpu_to_be16(0);
cpl->len = cpu_to_be16(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1663,7 +1680,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
static inline bool is_new_response(const struct rsp_ctrl *rc,
const struct sge_rspq *rspq)
{
- return RSPD_GEN(rc->type_gen) == rspq->gen;
+ return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
}
/**
@@ -1752,8 +1769,8 @@ static int process_responses(struct sge_rspq *rspq, int budget)
* SGE.
*/
dma_rmb();
- rsp_type = RSPD_TYPE(rc->type_gen);
- if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+ rsp_type = RSPD_TYPE_G(rc->type_gen);
+ if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
struct page_frag *fp;
struct pkt_gl gl;
const struct rx_sw_desc *sdesc;
@@ -1764,7 +1781,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
* If we get a "new buffer" message from the SGE we
* need to move on to the next Free List buffer.
*/
- if (len & RSPD_NEWBUF) {
+ if (len & RSPD_NEWBUF_F) {
/*
* We get one "new buffer" message when we
* first start up a queue so we need to ignore
@@ -1775,7 +1792,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1);
rspq->offset = 0;
}
- len = RSPD_LEN(len);
+ len = RSPD_LEN_G(len);
}
gl.tot_len = len;
@@ -1818,10 +1835,10 @@ static int process_responses(struct sge_rspq *rspq, int budget)
rspq->offset += ALIGN(fp->size, s->fl_align);
else
restore_rx_bufs(&gl, &rxq->fl, frag);
- } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+ } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
ret = rspq->handler(rspq, rspq->cur_desc, NULL);
} else {
- WARN_ON(rsp_type > RSP_TYPE_CPL);
+ WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
ret = 0;
}
@@ -1833,7 +1850,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
*/
const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
rspq->next_intr_params =
- QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
+ QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
break;
}
@@ -1875,7 +1892,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
intr_params = rspq->next_intr_params;
rspq->next_intr_params = rspq->intr_params;
} else
- intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
+ intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
if (unlikely(work_done == 0))
rspq->unhandled_irqs++;
@@ -1936,10 +1953,10 @@ static unsigned int process_intrq(struct adapter *adapter)
* never happen ...
*/
dma_rmb();
- if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
+ if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
dev_err(adapter->pdev_dev,
"Unexpected INTRQ response type %d\n",
- RSPD_TYPE(rc->type_gen));
+ RSPD_TYPE_G(rc->type_gen));
continue;
}
@@ -1951,7 +1968,7 @@ static unsigned int process_intrq(struct adapter *adapter)
* want to either make them fatal and/or conditionalized under
* DEBUG.
*/
- qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
+ qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
iq_idx = IQ_IDX(s, qid);
if (unlikely(iq_idx >= MAX_INGQ)) {
dev_err(adapter->pdev_dev,
@@ -2154,8 +2171,8 @@ static void __iomem *bar2_address(struct adapter *adapter,
u64 bar2_qoffset;
int ret;
- ret = t4_bar2_sge_qregs(adapter, qid, qtype,
- &bar2_qoffset, pbar2_qid);
+ ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
+ &bar2_qoffset, pbar2_qid);
if (ret)
return NULL;
@@ -2239,12 +2256,18 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
if (fl) {
+ enum chip_type chip =
+ CHELSIO_CHIP_VERSION(adapter->params.chip);
/*
* Allocate the ring for the hardware free list (with space
* for its status page) along with the associated software
* descriptor ring. The free list size needs to be a multiple
- * of the Egress Queue Unit.
+ * of the Egress Queue Unit and at least 2 Egress Units larger
+ * than the SGE's Egress Congrestion Threshold
+ * (fl_starve_thres - 1).
*/
+ if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
+ fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
sizeof(__be64), sizeof(struct rx_sw_desc),
@@ -2274,7 +2297,9 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.fl0dcaen_to_fl0cidxfthresh =
cpu_to_be16(
FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
- FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B));
+ FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+ FETCHBURSTMAX_512B_X :
+ FETCHBURSTMAX_256B_X));
cmd.fl0size = cpu_to_be16(flsz);
cmd.fl0addr = cpu_to_be64(fl->addr);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index b9debb4f29a3..88b8981b4751 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -51,6 +51,7 @@
*/
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
+#define CHELSIO_T6 0x6
enum chip_type {
T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
@@ -156,6 +157,12 @@ struct vpd_params {
u32 cclk; /* Core Clock (KHz) */
};
+/* Stores chip specific parameters */
+struct arch_specific_params {
+ u32 sge_fl_db;
+ u16 mps_tcam_size;
+};
+
/*
* Global Receive Side Scaling (RSS) parameters in host-native format.
*/
@@ -215,6 +222,7 @@ struct adapter_params {
struct vpd_params vpd; /* Vital Product Data */
struct rss_params rss; /* Receive Side Scaling */
struct vf_resources vfres; /* Virtual Function Resource limits */
+ struct arch_specific_params arch; /* chip specific params */
enum chip_type chip; /* chip code */
u8 nports; /* # of Ethernet "ports" */
};
@@ -284,11 +292,11 @@ int t4vf_fw_reset(struct adapter *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
-int t4_bar2_sge_qregs(struct adapter *adapter,
- unsigned int qid,
- enum t4_bar2_qtype qtype,
- u64 *pbar2_qoffset,
- unsigned int *pbar2_qid);
+int t4vf_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid);
int t4vf_get_sge_params(struct adapter *);
int t4vf_get_vpd_params(struct adapter *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 966ee900ed00..0db6dc9e9ed2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -428,7 +428,7 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
}
/**
- * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
* @adapter: the adapter
* @qid: the Queue ID
* @qtype: the Ingress or Egress type for @qid
@@ -452,11 +452,11 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
* Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
* then these "Inferred Queue ID" register may not be used.
*/
-int t4_bar2_sge_qregs(struct adapter *adapter,
- unsigned int qid,
- enum t4_bar2_qtype qtype,
- u64 *pbar2_qoffset,
- unsigned int *pbar2_qid)
+int t4vf_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid)
{
unsigned int page_shift, page_size, qpp_shift, qpp_mask;
u64 bar2_page_offset, bar2_qoffset;
@@ -1191,9 +1191,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned nfilters = 0;
unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
- unsigned int max_naddr = is_t4(adapter->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
if (naddr > max_naddr)
return -EINVAL;
@@ -1285,9 +1283,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
struct fw_vi_mac_exact *p = &cmd.u.exact[0];
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[1]), 16);
- unsigned int max_naddr = is_t4(adapter->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size;
/*
* If this is a new allocation, determine whether it should be
@@ -1310,7 +1306,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
if (ret == 0) {
p = &rpl.u.exact[0];
ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
- if (ret >= max_naddr)
+ if (ret >= max_mac_addr)
ret = -ENOMEM;
}
return ret;
@@ -1590,11 +1586,25 @@ int t4vf_prep_adapter(struct adapter *adapter)
switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
case CHELSIO_T4:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
+ adapter->params.arch.sge_fl_db = DBPRIO_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_CLS_SRAM_L_INSTANCES;
break;
case CHELSIO_T5:
chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
+ adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ break;
+
+ case CHELSIO_T6:
+ chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
break;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 0be6850be8a2..d106186f4f4a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -5,7 +5,7 @@
#include <linux/in.h>
#include <linux/types.h>
#include <linux/skbuff.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
#include "enic_res.h"
#include "enic_clsf.h"
@@ -15,14 +15,14 @@
* @rq: rq number to steer to
*
* This function returns filter_id(hardware_id) of the filter
- * added. In case of error it returns an negative number.
+ * added. In case of error it returns a negative number.
*/
int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
{
int res;
struct filter data;
- switch (keys->ip_proto) {
+ switch (keys->basic.ip_proto) {
case IPPROTO_TCP:
data.u.ipv4.protocol = PROTO_TCP;
break;
@@ -33,10 +33,10 @@ int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
return -EPROTONOSUPPORT;
};
data.type = FILTER_IPV4_5TUPLE;
- data.u.ipv4.src_addr = ntohl(keys->src);
- data.u.ipv4.dst_addr = ntohl(keys->dst);
- data.u.ipv4.src_port = ntohs(keys->port16[0]);
- data.u.ipv4.dst_port = ntohs(keys->port16[1]);
+ data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src);
+ data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst);
+ data.u.ipv4.src_port = ntohs(keys->ports.src);
+ data.u.ipv4.dst_port = ntohs(keys->ports.dst);
data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
spin_lock_bh(&enic->devcmd_lock);
@@ -158,11 +158,11 @@ static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
struct enic_rfs_fltr_node *tpos;
hlist_for_each_entry(tpos, h, node)
- if (tpos->keys.src == k->src &&
- tpos->keys.dst == k->dst &&
- tpos->keys.ports == k->ports &&
- tpos->keys.ip_proto == k->ip_proto &&
- tpos->keys.n_proto == k->n_proto)
+ if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src &&
+ tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst &&
+ tpos->keys.ports.ports == k->ports.ports &&
+ tpos->keys.basic.ip_proto == k->basic.ip_proto &&
+ tpos->keys.basic.n_proto == k->basic.n_proto)
return tpos;
return NULL;
}
@@ -177,9 +177,10 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
int res, i;
enic = netdev_priv(dev);
- res = skb_flow_dissect(skb, &keys);
- if (!res || keys.n_proto != htons(ETH_P_IP) ||
- (keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP))
+ res = skb_flow_dissect_flow_keys(skb, &keys);
+ if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
+ (keys.basic.ip_proto != IPPROTO_TCP &&
+ keys.basic.ip_proto != IPPROTO_UDP))
return -EPROTONOSUPPORT;
tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 28d9ca675a27..73874b2575bf 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -334,7 +334,7 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
n = htbl_fltr_search(enic, (u16)fsp->location);
if (!n)
return -EINVAL;
- switch (n->keys.ip_proto) {
+ switch (n->keys.basic.ip_proto) {
case IPPROTO_TCP:
fsp->flow_type = TCP_V4_FLOW;
break;
@@ -346,16 +346,16 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
break;
}
- fsp->h_u.tcp_ip4_spec.ip4src = n->keys.src;
+ fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
- fsp->h_u.tcp_ip4_spec.ip4dst = n->keys.dst;
+ fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
- fsp->h_u.tcp_ip4_spec.psrc = n->keys.port16[0];
+ fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
- fsp->h_u.tcp_ip4_spec.pdst = n->keys.port16[1];
+ fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
fsp->ring_cookie = n->rq_id;
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index badff181e719..8966f3159bb2 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -5189,16 +5189,16 @@ de4x5_parse_params(struct net_device *dev)
if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
- if (strstr(p, "TP")) {
- lp->params.autosense = TP;
- } else if (strstr(p, "TP_NW")) {
+ if (strstr(p, "TP_NW")) {
lp->params.autosense = TP_NW;
+ } else if (strstr(p, "TP")) {
+ lp->params.autosense = TP;
+ } else if (strstr(p, "BNC_AUI")) {
+ lp->params.autosense = BNC;
} else if (strstr(p, "BNC")) {
lp->params.autosense = BNC;
} else if (strstr(p, "AUI")) {
lp->params.autosense = AUI;
- } else if (strstr(p, "BNC_AUI")) {
- lp->params.autosense = BNC;
} else if (strstr(p, "10Mb")) {
lp->params.autosense = _10Mb;
} else if (strstr(p, "100Mb")) {
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 2c30c0c83f98..447d09272ab7 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1115,7 +1115,7 @@ static void uli526x_timer(unsigned long data)
netif_carrier_off(dev);
}
}
- db->init=0;
+ db->init = 0;
/* Timer active again */
db->timer.expires = ULI526X_TIMER_WUT;
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 1274b6fdac8a..cf0a5fcdaaaf 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -463,10 +463,8 @@ rio_open (struct net_device *dev)
dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
}
- init_timer (&np->timer);
+ setup_timer(&np->timer, rio_timer, (unsigned long)dev);
np->timer.expires = jiffies + 1*HZ;
- np->timer.data = (unsigned long) dev;
- np->timer.function = rio_timer;
add_timer (&np->timer);
/* Start Tx/Rx */
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index ea94a8eb6b35..7108563260ae 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -5,6 +5,15 @@ config BE2NET
This driver implements the NIC functionality for ServerEngines'
10Gbps network adapter - BladeEngine.
+config BE2NET_HWMON
+ bool "HWMON support for be2net driver"
+ depends on BE2NET && HWMON
+ depends on !(BE2NET=y && HWMON=m)
+ default y
+ ---help---
+ Say Y here if you want to expose thermal sensor data on
+ be2net network adapter.
+
config BE2NET_VXLAN
bool "VXLAN offload support on be2net driver"
default y
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 1bf1cdce74ac..8d12b41b3b19 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -31,11 +31,13 @@
#include <linux/slab.h>
#include <linux/u64_stats_sync.h>
#include <linux/cpumask.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "10.6.0.1"
+#define DRV_VER "10.6.0.2"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -314,7 +316,6 @@ struct be_rx_obj {
} ____cacheline_aligned_in_smp;
struct be_drv_stats {
- u32 be_on_die_temperature;
u32 eth_red_drops;
u32 dma_map_errors;
u32 rx_drops_no_pbuf;
@@ -366,6 +367,7 @@ struct be_vf_cfg {
u32 tx_rate;
u32 plink_tracking;
u32 privileges;
+ bool spoofchk;
};
enum vf_state {
@@ -382,6 +384,7 @@ enum vf_state {
#define BE_FLAGS_SETUP_DONE BIT(9)
#define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10)
#define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11)
+#define BE_FLAGS_OS2BMC BIT(12)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
@@ -426,6 +429,8 @@ struct be_resources {
u32 vf_if_cap_flags; /* VF if capability flags */
};
+#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
+
struct rss_info {
u64 rss_flags;
u8 rsstable[RSS_INDIR_TABLE_LEN];
@@ -433,6 +438,12 @@ struct rss_info {
u8 rss_hkey[RSS_HASH_KEY_LEN];
};
+#define BE_INVALID_DIE_TEMP 0xFF
+struct be_hwmon {
+ struct device *hwmon_dev;
+ u8 be_on_die_temp; /* Unit: millidegree Celsius */
+};
+
/* Macros to read/write the 'features' word of be_wrb_params structure.
*/
#define BE_WRB_F_BIT(name) BE_WRB_F_##name##_BIT
@@ -453,7 +464,8 @@ enum {
BE_WRB_F_LSO_BIT, /* LSO */
BE_WRB_F_LSO6_BIT, /* LSO6 */
BE_WRB_F_VLAN_BIT, /* VLAN */
- BE_WRB_F_VLAN_SKIP_HW_BIT /* Skip VLAN tag (workaround) */
+ BE_WRB_F_VLAN_SKIP_HW_BIT, /* Skip VLAN tag (workaround) */
+ BE_WRB_F_OS2BMC_BIT /* Send packet to the management ring */
};
/* The structure below provides a HW-agnostic abstraction of WRB params
@@ -514,6 +526,7 @@ struct be_adapter {
u16 work_counter;
struct delayed_work be_err_detection_work;
+ u8 err_flags;
u32 flags;
u32 cmd_privileges;
/* Ethtool knobs and info */
@@ -572,8 +585,11 @@ struct be_adapter {
u16 qnq_vid;
u32 msg_enable;
int be_get_temp_freq;
+ struct be_hwmon hwmon_info;
u8 pf_number;
struct rss_info rss_info;
+ /* Filters for packets that need to be sent to BMC */
+ u32 bmc_filt_mask;
};
#define be_physfn(adapter) (!adapter->virtfn)
@@ -772,26 +788,36 @@ static inline bool is_ipv4_pkt(struct sk_buff *skb)
return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
}
-static inline bool be_multi_rxq(const struct be_adapter *adapter)
+#define BE_ERROR_EEH 1
+#define BE_ERROR_UE BIT(1)
+#define BE_ERROR_FW BIT(2)
+#define BE_ERROR_HW (BE_ERROR_EEH | BE_ERROR_UE)
+#define BE_ERROR_ANY (BE_ERROR_EEH | BE_ERROR_UE | BE_ERROR_FW)
+#define BE_CLEAR_ALL 0xFF
+
+static inline u8 be_check_error(struct be_adapter *adapter, u32 err_type)
{
- return adapter->num_rx_qs > 1;
+ return (adapter->err_flags & err_type);
}
-static inline bool be_error(struct be_adapter *adapter)
+static inline void be_set_error(struct be_adapter *adapter, int err_type)
{
- return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->err_flags |= err_type;
+ netif_carrier_off(netdev);
+
+ dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
}
-static inline bool be_hw_error(struct be_adapter *adapter)
+static inline void be_clear_error(struct be_adapter *adapter, int err_type)
{
- return adapter->eeh_error || adapter->hw_error;
+ adapter->err_flags &= ~err_type;
}
-static inline void be_clear_all_error(struct be_adapter *adapter)
+static inline bool be_multi_rxq(const struct be_adapter *adapter)
{
- adapter->eeh_error = false;
- adapter->hw_error = false;
- adapter->fw_timeout = false;
+ return adapter->num_rx_qs > 1;
}
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
@@ -804,6 +830,7 @@ bool be_pause_supported(struct be_adapter *adapter);
u32 be_get_fw_log_level(struct be_adapter *adapter);
int be_update_queues(struct be_adapter *adapter);
int be_poll(struct napi_struct *napi, int budget);
+void be_eqd_update(struct be_adapter *adapter, bool force_update);
/*
* internal function to initialize-cleanup roce device.
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index c5e1d0ac75f9..9eac3227d2ca 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -93,7 +93,7 @@ static void be_mcc_notify(struct be_adapter *adapter)
struct be_queue_info *mccq = &adapter->mcc_obj.q;
u32 val = 0;
- if (be_error(adapter))
+ if (be_check_error(adapter, BE_ERROR_ANY))
return;
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
@@ -140,6 +140,7 @@ static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
if (base_status == MCC_STATUS_NOT_SUPPORTED ||
base_status == MCC_STATUS_ILLEGAL_REQUEST ||
addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
+ addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
(opcode == OPCODE_COMMON_WRITE_FLASHROM &&
(base_status == MCC_STATUS_ILLEGAL_FIELD ||
addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
@@ -191,10 +192,12 @@ static void be_async_cmd_process(struct be_adapter *adapter,
if (base_status == MCC_STATUS_SUCCESS) {
struct be_cmd_resp_get_cntl_addnl_attribs *resp =
(void *)resp_hdr;
- adapter->drv_stats.be_on_die_temperature =
+ adapter->hwmon_info.be_on_die_temp =
resp->on_die_temperature;
} else {
adapter->be_get_temp_freq = 0;
+ adapter->hwmon_info.be_on_die_temp =
+ BE_INVALID_DIE_TEMP;
}
return;
}
@@ -330,6 +333,21 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
}
}
+#define MGMT_ENABLE_MASK 0x4
+static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
+ u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
+
+ if (evt_dw1 & MGMT_ENABLE_MASK) {
+ adapter->flags |= BE_FLAGS_OS2BMC;
+ adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
+ } else {
+ adapter->flags &= ~BE_FLAGS_OS2BMC;
+ }
+}
+
static void be_async_grp5_evt_process(struct be_adapter *adapter,
struct be_mcc_compl *compl)
{
@@ -346,6 +364,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
case ASYNC_EVENT_PVID_STATE:
be_async_grp5_pvid_state_process(adapter, compl);
break;
+ /* Async event to disable/enable os2bmc and/or mac-learning */
+ case ASYNC_EVENT_FW_CONTROL:
+ be_async_grp5_fw_control_process(adapter, compl);
+ break;
default:
break;
}
@@ -486,7 +508,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
for (i = 0; i < mcc_timeout; i++) {
- if (be_error(adapter))
+ if (be_check_error(adapter, BE_ERROR_ANY))
return -EIO;
local_bh_disable();
@@ -499,7 +521,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
}
if (i == mcc_timeout) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
- adapter->fw_timeout = true;
+ be_set_error(adapter, BE_ERROR_FW);
return -EIO;
}
return status;
@@ -538,7 +560,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
u32 ready;
do {
- if (be_error(adapter))
+ if (be_check_error(adapter, BE_ERROR_ANY))
return -EIO;
ready = ioread32(db);
@@ -551,7 +573,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
- adapter->fw_timeout = true;
+ be_set_error(adapter, BE_ERROR_FW);
be_detect_error(adapter);
return -1;
}
@@ -1457,7 +1479,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
*if_handle = le32_to_cpu(resp->interface_id);
/* Hack to retrieve VF's pmac-id on BE3 */
- if (BE3_chip(adapter) && !be_physfn(adapter))
+ if (BE3_chip(adapter) && be_virtfn(adapter))
adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
}
return status;
@@ -3156,7 +3178,7 @@ int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
}
int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
- u32 domain, u16 intf_id, u16 hsw_mode)
+ u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_hsw_config *req;
@@ -3192,6 +3214,14 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
ctxt, hsw_mode);
}
+ /* Enable/disable both mac and vlan spoof checking */
+ if (!BEx_chip(adapter) && spoofchk) {
+ AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
+ ctxt, spoofchk);
+ AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
+ ctxt, spoofchk);
+ }
+
be_dws_cpu_to_le(req->context, sizeof(req->context));
status = be_mcc_notify_wait(adapter);
@@ -3202,7 +3232,7 @@ err:
/* Get Hyper switch config */
int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
- u32 domain, u16 intf_id, u8 *mode)
+ u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_hsw_config *req;
@@ -3250,6 +3280,10 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
if (mode)
*mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
port_fwd_type, &resp->context);
+ if (spoofchk)
+ *spoofchk =
+ AMAP_GET_BITS(struct amap_get_hsw_resp_context,
+ spoofchk, &resp->context);
}
err:
@@ -3261,7 +3295,7 @@ static bool be_is_wol_excluded(struct be_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- if (!be_physfn(adapter))
+ if (be_virtfn(adapter))
return true;
switch (pdev->subsystem_device) {
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 1ec22300e254..2716e6f30d9a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -65,7 +65,8 @@ enum mcc_base_status {
enum mcc_addl_status {
MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16,
MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d,
- MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a
+ MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a,
+ MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab
};
#define CQE_BASE_STATUS_MASK 0xFFFF
@@ -104,6 +105,7 @@ struct be_mcc_compl {
#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
#define ASYNC_EVENT_CODE_SLIPORT 0x11
#define ASYNC_EVENT_PORT_MISCONFIG 0x9
+#define ASYNC_EVENT_FW_CONTROL 0x5
enum {
LINK_DOWN = 0x0,
@@ -180,6 +182,22 @@ struct be_async_event_misconfig_port {
u32 flags;
} __packed;
+#define BMC_FILT_BROADCAST_ARP BIT(0)
+#define BMC_FILT_BROADCAST_DHCP_CLIENT BIT(1)
+#define BMC_FILT_BROADCAST_DHCP_SERVER BIT(2)
+#define BMC_FILT_BROADCAST_NET_BIOS BIT(3)
+#define BMC_FILT_BROADCAST BIT(7)
+#define BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER BIT(8)
+#define BMC_FILT_MULTICAST_IPV6_RA BIT(9)
+#define BMC_FILT_MULTICAST_IPV6_RAS BIT(10)
+#define BMC_FILT_MULTICAST BIT(15)
+struct be_async_fw_control {
+ u32 event_data_word1;
+ u32 event_data_word2;
+ u32 evt_tag;
+ u32 event_data_word4;
+} __packed;
+
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_compl compl;
@@ -1109,10 +1127,6 @@ struct be_cmd_req_query_fw_cfg {
u32 rsvd[31];
};
-/* ASIC revisions */
-#define ASIC_REV_B0 0x10
-#define ASIC_REV_P2 0x11
-
struct be_cmd_resp_query_fw_cfg {
struct be_cmd_resp_hdr hdr;
u32 be_config_number;
@@ -1745,18 +1759,24 @@ struct be_cmd_req_set_mac_list {
#define PORT_FWD_TYPE_VEPA 0x3
#define PORT_FWD_TYPE_VEB 0x2
+#define ENABLE_MAC_SPOOFCHK 0x2
+#define DISABLE_MAC_SPOOFCHK 0x3
+
struct amap_set_hsw_context {
u8 interface_id[16];
- u8 rsvd0[14];
+ u8 rsvd0[8];
+ u8 mac_spoofchk[2];
+ u8 rsvd1[4];
u8 pvid_valid;
u8 pport;
- u8 rsvd1[6];
+ u8 rsvd2[6];
u8 port_fwd_type[3];
- u8 rsvd2[7];
+ u8 rsvd3[5];
+ u8 vlan_spoofchk[2];
u8 pvid[16];
- u8 rsvd3[32];
u8 rsvd4[32];
u8 rsvd5[32];
+ u8 rsvd6[32];
} __packed;
struct be_cmd_req_set_hsw_config {
@@ -1774,11 +1794,13 @@ struct amap_get_hsw_req_context {
struct amap_get_hsw_resp_context {
u8 rsvd0[6];
u8 port_fwd_type[3];
- u8 rsvd1[7];
+ u8 rsvd1[5];
+ u8 spoofchk;
+ u8 rsvd2;
u8 pvid[16];
- u8 rsvd2[32];
u8 rsvd3[32];
u8 rsvd4[32];
+ u8 rsvd5[32];
} __packed;
struct be_cmd_req_get_hsw_config {
@@ -2334,9 +2356,9 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
u32 domain);
int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom);
int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
- u16 intf_id, u16 hsw_mode);
+ u16 intf_id, u16 hsw_mode, u8 spoofchk);
int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
- u16 intf_id, u8 *mode);
+ u16 intf_id, u8 *mode, bool *spoofchk);
int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level);
int be_cmd_get_fw_log_level(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 2835dee5dc39..b2476dbfd103 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -123,7 +123,6 @@ static const struct be_ethtool_stat et_stats[] = {
{DRVSTAT_INFO(dma_map_errors)},
/* Number of packets dropped due to random early drop function */
{DRVSTAT_INFO(eth_red_drops)},
- {DRVSTAT_INFO(be_on_die_temperature)},
{DRVSTAT_INFO(rx_roce_bytes_lsd)},
{DRVSTAT_INFO(rx_roce_bytes_msd)},
{DRVSTAT_INFO(rx_roce_frames)},
@@ -368,6 +367,14 @@ static int be_set_coalesce(struct net_device *netdev,
aic++;
}
+ /* For Skyhawk, the EQD setting happens via EQ_DB when AIC is enabled.
+ * When AIC is disabled, persistently force set EQD value via the
+ * FW cmd, so that we don't have to calculate the delay multiplier
+ * encode value each time EQ_DB is rung
+ */
+ if (!et->use_adaptive_rx_coalesce && skyhawk_chip(adapter))
+ be_eqd_update(adapter, true);
+
return 0;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 48840889db62..c684bb32b487 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -132,6 +132,18 @@
#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
+/* Rearm to interrupt delay encoding */
+#define DB_EQ_R2I_DLY_SHIFT (30) /* bits 30 - 31 */
+
+/* Rearm to interrupt (R2I) delay multiplier encoding represents 3 different
+ * values configured in CEV_REARM2IRPT_DLY_MULT_CSR register. This value is
+ * programmed by host driver while ringing an EQ doorbell(EQ_DB) if a delay
+ * between rearming the EQ and next interrupt on this EQ is desired.
+ */
+#define R2I_DLY_ENC_0 0 /* No delay */
+#define R2I_DLY_ENC_1 1 /* maps to 160us EQ delay */
+#define R2I_DLY_ENC_2 2 /* maps to 96us EQ delay */
+#define R2I_DLY_ENC_3 3 /* maps to 48us EQ delay */
/********* Compl Q door bell *************/
#define DB_CQ_OFFSET 0x120
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index e43cc8a73ea7..c0f34845cf59 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -179,7 +179,7 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
if (lancer_chip(adapter))
return;
- if (adapter->eeh_error)
+ if (be_check_error(adapter, BE_ERROR_EEH))
return;
status = be_cmd_intr_set(adapter, enable);
@@ -191,6 +191,9 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
{
u32 val = 0;
+ if (be_check_error(adapter, BE_ERROR_HW))
+ return;
+
val |= qid & DB_RQ_RING_ID_MASK;
val |= posted << DB_RQ_NUM_POSTED_SHIFT;
@@ -203,6 +206,9 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
{
u32 val = 0;
+ if (be_check_error(adapter, BE_ERROR_HW))
+ return;
+
val |= txo->q.id & DB_TXULP_RING_ID_MASK;
val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
@@ -211,14 +217,15 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
}
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
- bool arm, bool clear_int, u16 num_popped)
+ bool arm, bool clear_int, u16 num_popped,
+ u32 eq_delay_mult_enc)
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
- if (adapter->eeh_error)
+ if (be_check_error(adapter, BE_ERROR_HW))
return;
if (arm)
@@ -227,6 +234,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
val |= 1 << DB_EQ_CLR_SHIFT;
val |= 1 << DB_EQ_EVNT_SHIFT;
val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
+ val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
iowrite32(val, adapter->db + DB_EQ_OFFSET);
}
@@ -238,7 +246,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
DB_CQ_RING_ID_EXT_MASK_SHIFT);
- if (adapter->eeh_error)
+ if (be_check_error(adapter, BE_ERROR_HW))
return;
if (arm)
@@ -662,6 +670,8 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
netif_carrier_on(netdev);
else
netif_carrier_off(netdev);
+
+ netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
}
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
@@ -810,6 +820,8 @@ static void wrb_fill_hdr(struct be_adapter *adapter,
SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
+ SET_TX_WRB_HDR_BITS(mgmt, hdr,
+ BE_WRB_F_GET(wrb_params->features, OS2BMC));
}
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
@@ -1146,6 +1158,130 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
txo->pend_wrb_cnt = 0;
}
+/* OS2BMC related */
+
+#define DHCP_CLIENT_PORT 68
+#define DHCP_SERVER_PORT 67
+#define NET_BIOS_PORT1 137
+#define NET_BIOS_PORT2 138
+#define DHCPV6_RAS_PORT 547
+
+#define is_mc_allowed_on_bmc(adapter, eh) \
+ (!is_multicast_filt_enabled(adapter) && \
+ is_multicast_ether_addr(eh->h_dest) && \
+ !is_broadcast_ether_addr(eh->h_dest))
+
+#define is_bc_allowed_on_bmc(adapter, eh) \
+ (!is_broadcast_filt_enabled(adapter) && \
+ is_broadcast_ether_addr(eh->h_dest))
+
+#define is_arp_allowed_on_bmc(adapter, skb) \
+ (is_arp(skb) && is_arp_filt_enabled(adapter))
+
+#define is_broadcast_packet(eh, adapter) \
+ (is_multicast_ether_addr(eh->h_dest) && \
+ !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
+
+#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
+
+#define is_arp_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
+
+#define is_dhcp_client_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
+
+#define is_dhcp_srvr_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
+
+#define is_nbios_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
+
+#define is_ipv6_na_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & \
+ BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
+
+#define is_ipv6_ra_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
+
+#define is_ipv6_ras_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
+
+#define is_broadcast_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
+
+#define is_multicast_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
+
+static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
+ struct sk_buff **skb)
+{
+ struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
+ bool os2bmc = false;
+
+ if (!be_is_os2bmc_enabled(adapter))
+ goto done;
+
+ if (!is_multicast_ether_addr(eh->h_dest))
+ goto done;
+
+ if (is_mc_allowed_on_bmc(adapter, eh) ||
+ is_bc_allowed_on_bmc(adapter, eh) ||
+ is_arp_allowed_on_bmc(adapter, (*skb))) {
+ os2bmc = true;
+ goto done;
+ }
+
+ if ((*skb)->protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *hdr = ipv6_hdr((*skb));
+ u8 nexthdr = hdr->nexthdr;
+
+ if (nexthdr == IPPROTO_ICMPV6) {
+ struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
+
+ switch (icmp6->icmp6_type) {
+ case NDISC_ROUTER_ADVERTISEMENT:
+ os2bmc = is_ipv6_ra_filt_enabled(adapter);
+ goto done;
+ case NDISC_NEIGHBOUR_ADVERTISEMENT:
+ os2bmc = is_ipv6_na_filt_enabled(adapter);
+ goto done;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (is_udp_pkt((*skb))) {
+ struct udphdr *udp = udp_hdr((*skb));
+
+ switch (udp->dest) {
+ case DHCP_CLIENT_PORT:
+ os2bmc = is_dhcp_client_filt_enabled(adapter);
+ goto done;
+ case DHCP_SERVER_PORT:
+ os2bmc = is_dhcp_srvr_filt_enabled(adapter);
+ goto done;
+ case NET_BIOS_PORT1:
+ case NET_BIOS_PORT2:
+ os2bmc = is_nbios_filt_enabled(adapter);
+ goto done;
+ case DHCPV6_RAS_PORT:
+ os2bmc = is_ipv6_ras_filt_enabled(adapter);
+ goto done;
+ default:
+ break;
+ }
+ }
+done:
+ /* For packets over a vlan, which are destined
+ * to BMC, asic expects the vlan to be inline in the packet.
+ */
+ if (os2bmc)
+ *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
+
+ return os2bmc;
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1167,6 +1303,18 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
goto drop;
}
+ /* if os2bmc is enabled and if the pkt is destined to bmc,
+ * enqueue the pkt a 2nd time with mgmt bit set.
+ */
+ if (be_send_pkt_to_bmc(adapter, &skb)) {
+ BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
+ wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
+ if (unlikely(!wrb_cnt))
+ goto drop;
+ else
+ skb_get(skb);
+ }
+
if (be_is_txq_full(txo)) {
netif_stop_subqueue(netdev, q_idx);
tx_stats(txo)->tx_stops++;
@@ -1265,7 +1413,8 @@ static int be_vid_config(struct be_adapter *adapter)
if (status) {
dev_err(dev, "Setting HW VLAN filtering failed\n");
/* Set to VLAN promisc mode as setting VLAN filter failed */
- if (addl_status(status) ==
+ if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
+ addl_status(status) ==
MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
return be_set_vlan_promisc(adapter);
} else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
@@ -1466,6 +1615,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
+ vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
return 0;
}
@@ -1478,7 +1628,7 @@ static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
int status;
/* Enable Transparent VLAN Tagging */
- status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
if (status)
return status;
@@ -1507,7 +1657,7 @@ static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
/* Reset Transparent VLAN Tagging. */
status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
- vf_cfg->if_handle, 0);
+ vf_cfg->if_handle, 0, 0);
if (status)
return status;
@@ -1642,6 +1792,39 @@ static int be_set_vf_link_state(struct net_device *netdev, int vf,
return 0;
}
+static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+ u8 spoofchk;
+ int status;
+
+ if (!sriov_enabled(adapter))
+ return -EPERM;
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ if (BEx_chip(adapter))
+ return -EOPNOTSUPP;
+
+ if (enable == vf_cfg->spoofchk)
+ return 0;
+
+ spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
+
+ status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
+ 0, spoofchk);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Spoofchk change on VF %d failed: %#x\n", vf, status);
+ return be_cmd_status(status);
+ }
+
+ vf_cfg->spoofchk = enable;
+ return 0;
+}
+
static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
ulong now)
{
@@ -1650,61 +1833,110 @@ static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
aic->jiffies = now;
}
-static void be_eqd_update(struct be_adapter *adapter)
+static int be_get_new_eqd(struct be_eq_obj *eqo)
{
- struct be_set_eqd set_eqd[MAX_EVT_QS];
- int eqd, i, num = 0, start;
+ struct be_adapter *adapter = eqo->adapter;
+ int eqd, start;
struct be_aic_obj *aic;
- struct be_eq_obj *eqo;
struct be_rx_obj *rxo;
struct be_tx_obj *txo;
- u64 rx_pkts, tx_pkts;
+ u64 rx_pkts = 0, tx_pkts = 0;
ulong now;
u32 pps, delta;
+ int i;
- for_all_evt_queues(adapter, eqo, i) {
- aic = &adapter->aic_obj[eqo->idx];
- if (!aic->enable) {
- if (aic->jiffies)
- aic->jiffies = 0;
- eqd = aic->et_eqd;
- goto modify_eqd;
- }
+ aic = &adapter->aic_obj[eqo->idx];
+ if (!aic->enable) {
+ if (aic->jiffies)
+ aic->jiffies = 0;
+ eqd = aic->et_eqd;
+ return eqd;
+ }
- rxo = &adapter->rx_obj[eqo->idx];
+ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
do {
start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
- rx_pkts = rxo->stats.rx_pkts;
+ rx_pkts += rxo->stats.rx_pkts;
} while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
+ }
- txo = &adapter->tx_obj[eqo->idx];
+ for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
do {
start = u64_stats_fetch_begin_irq(&txo->stats.sync);
- tx_pkts = txo->stats.tx_reqs;
+ tx_pkts += txo->stats.tx_reqs;
} while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
+ }
- /* Skip, if wrapped around or first calculation */
- now = jiffies;
- if (!aic->jiffies || time_before(now, aic->jiffies) ||
- rx_pkts < aic->rx_pkts_prev ||
- tx_pkts < aic->tx_reqs_prev) {
- be_aic_update(aic, rx_pkts, tx_pkts, now);
- continue;
- }
+ /* Skip, if wrapped around or first calculation */
+ now = jiffies;
+ if (!aic->jiffies || time_before(now, aic->jiffies) ||
+ rx_pkts < aic->rx_pkts_prev ||
+ tx_pkts < aic->tx_reqs_prev) {
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
+ return aic->prev_eqd;
+ }
- delta = jiffies_to_msecs(now - aic->jiffies);
- pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
- (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
- eqd = (pps / 15000) << 2;
+ delta = jiffies_to_msecs(now - aic->jiffies);
+ if (delta == 0)
+ return aic->prev_eqd;
- if (eqd < 8)
- eqd = 0;
- eqd = min_t(u32, eqd, aic->max_eqd);
- eqd = max_t(u32, eqd, aic->min_eqd);
+ pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
+ (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
+ eqd = (pps / 15000) << 2;
- be_aic_update(aic, rx_pkts, tx_pkts, now);
-modify_eqd:
- if (eqd != aic->prev_eqd) {
+ if (eqd < 8)
+ eqd = 0;
+ eqd = min_t(u32, eqd, aic->max_eqd);
+ eqd = max_t(u32, eqd, aic->min_eqd);
+
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
+
+ return eqd;
+}
+
+/* For Skyhawk-R only */
+static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
+{
+ struct be_adapter *adapter = eqo->adapter;
+ struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
+ ulong now = jiffies;
+ int eqd;
+ u32 mult_enc;
+
+ if (!aic->enable)
+ return 0;
+
+ if (time_before_eq(now, aic->jiffies) ||
+ jiffies_to_msecs(now - aic->jiffies) < 1)
+ eqd = aic->prev_eqd;
+ else
+ eqd = be_get_new_eqd(eqo);
+
+ if (eqd > 100)
+ mult_enc = R2I_DLY_ENC_1;
+ else if (eqd > 60)
+ mult_enc = R2I_DLY_ENC_2;
+ else if (eqd > 20)
+ mult_enc = R2I_DLY_ENC_3;
+ else
+ mult_enc = R2I_DLY_ENC_0;
+
+ aic->prev_eqd = eqd;
+
+ return mult_enc;
+}
+
+void be_eqd_update(struct be_adapter *adapter, bool force_update)
+{
+ struct be_set_eqd set_eqd[MAX_EVT_QS];
+ struct be_aic_obj *aic;
+ struct be_eq_obj *eqo;
+ int i, num = 0, eqd;
+
+ for_all_evt_queues(adapter, eqo, i) {
+ aic = &adapter->aic_obj[eqo->idx];
+ eqd = be_get_new_eqd(eqo);
+ if (force_update || eqd != aic->prev_eqd) {
set_eqd[num].delay_multiplier = (eqd * 65)/100;
set_eqd[num].eq_id = eqo->q.id;
aic->prev_eqd = eqd;
@@ -2212,7 +2444,7 @@ static void be_eq_clean(struct be_eq_obj *eqo)
{
int num = events_get(eqo);
- be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
}
static void be_rx_cq_clean(struct be_rx_obj *rxo)
@@ -2236,7 +2468,9 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
if (lancer_chip(adapter))
break;
- if (flush_wait++ > 10 || be_hw_error(adapter)) {
+ if (flush_wait++ > 50 ||
+ be_check_error(adapter,
+ BE_ERROR_HW)) {
dev_warn(&adapter->pdev->dev,
"did not receive flush compl\n");
break;
@@ -2297,7 +2531,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
pending_txqs--;
}
- if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
+ if (pending_txqs == 0 || ++timeo > 10 ||
+ be_check_error(adapter, BE_ERROR_HW))
break;
mdelay(1);
@@ -2573,7 +2808,7 @@ static irqreturn_t be_intx(int irq, void *dev)
if (num_evts)
eqo->spurious_intr = 0;
}
- be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
+ be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
/* Return IRQ_HANDLED only for the the first spurious intr
* after a valid intr to stop the kernel from branding
@@ -2589,7 +2824,7 @@ static irqreturn_t be_msix(int irq, void *dev)
{
struct be_eq_obj *eqo = dev;
- be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
napi_schedule(&eqo->napi);
return IRQ_HANDLED;
}
@@ -2838,6 +3073,7 @@ int be_poll(struct napi_struct *napi, int budget)
int max_work = 0, work, i, num_evts;
struct be_rx_obj *rxo;
struct be_tx_obj *txo;
+ u32 mult_enc = 0;
num_evts = events_get(eqo);
@@ -2863,10 +3099,18 @@ int be_poll(struct napi_struct *napi, int budget)
if (max_work < budget) {
napi_complete(napi);
- be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
+
+ /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
+ * delay via a delay multiplier encoding value
+ */
+ if (skyhawk_chip(adapter))
+ mult_enc = be_get_eq_delay_mult_enc(eqo);
+
+ be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
+ mult_enc);
} else {
/* As we'll continue in polling mode, count and clear events */
- be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
+ be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
}
return max_work;
}
@@ -2898,22 +3142,19 @@ void be_detect_error(struct be_adapter *adapter)
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
- bool error_detected = false;
struct device *dev = &adapter->pdev->dev;
- struct net_device *netdev = adapter->netdev;
- if (be_hw_error(adapter))
+ if (be_check_error(adapter, BE_ERROR_HW))
return;
if (lancer_chip(adapter)) {
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ be_set_error(adapter, BE_ERROR_UE);
sliport_err1 = ioread32(adapter->db +
SLIPORT_ERROR1_OFFSET);
sliport_err2 = ioread32(adapter->db +
SLIPORT_ERROR2_OFFSET);
- adapter->hw_error = true;
- error_detected = true;
/* Do not log error messages if its a FW reset */
if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
@@ -2945,12 +3186,12 @@ void be_detect_error(struct be_adapter *adapter)
*/
if (ue_lo || ue_hi) {
- error_detected = true;
dev_err(dev,
"Unrecoverable Error detected in the adapter");
dev_err(dev, "Please reboot server to recover");
if (skyhawk_chip(adapter))
- adapter->hw_error = true;
+ be_set_error(adapter, BE_ERROR_UE);
+
for (i = 0; ue_lo; ue_lo >>= 1, i++) {
if (ue_lo & 1)
dev_err(dev, "UE: %s bit set\n",
@@ -2963,8 +3204,6 @@ void be_detect_error(struct be_adapter *adapter)
}
}
}
- if (error_detected)
- netif_carrier_off(netdev);
}
static void be_msix_disable(struct be_adapter *adapter)
@@ -3015,7 +3254,7 @@ fail:
dev_warn(dev, "MSIx enable failed\n");
/* INTx is not supported in VFs, so fail probe if enable_msix fails */
- if (!be_physfn(adapter))
+ if (be_virtfn(adapter))
return num_vec;
return 0;
}
@@ -3062,7 +3301,7 @@ static int be_irq_register(struct be_adapter *adapter)
if (status == 0)
goto done;
/* INTx is not supported for VF */
- if (!be_physfn(adapter))
+ if (be_virtfn(adapter))
return status;
}
@@ -3229,9 +3468,12 @@ static int be_rx_qs_create(struct be_adapter *adapter)
memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
- /* First time posting */
+ /* Post 1 less than RXQ-len to avoid head being equal to tail,
+ * which is a queue empty condition
+ */
for_all_rx_queues(adapter, rxo, i)
- be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
+ be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
+
return 0;
}
@@ -3263,7 +3505,7 @@ static int be_open(struct net_device *netdev)
for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi);
be_enable_busy_poll(eqo);
- be_eq_notify(adapter, eqo->q.id, true, true, 0);
+ be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
}
adapter->flags |= BE_FLAGS_NAPI_ENABLED;
@@ -3563,7 +3805,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
/* If a FW profile exists, then cap_flags are updated */
cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST;
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
for_all_vfs(adapter, vf_cfg, vf) {
if (!BE3_chip(adapter)) {
@@ -3610,6 +3852,7 @@ static int be_vf_setup(struct be_adapter *adapter)
struct device *dev = &adapter->pdev->dev;
struct be_vf_cfg *vf_cfg;
int status, old_vfs, vf;
+ bool spoofchk;
old_vfs = pci_num_vf(adapter->pdev);
@@ -3657,6 +3900,12 @@ static int be_vf_setup(struct be_adapter *adapter)
if (!old_vfs)
be_cmd_config_qos(adapter, 0, 0, vf + 1);
+ status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
+ vf_cfg->if_handle, NULL,
+ &spoofchk);
+ if (!status)
+ vf_cfg->spoofchk = spoofchk;
+
if (!old_vfs) {
be_cmd_enable_vf(adapter, vf + 1);
be_cmd_set_logical_link_config(adapter,
@@ -3733,8 +3982,9 @@ static void BEx_get_resources(struct be_adapter *adapter,
* *only* if it is RSS-capable.
*/
if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
- !be_physfn(adapter) || (be_is_mc(adapter) &&
- !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
+ be_virtfn(adapter) ||
+ (be_is_mc(adapter) &&
+ !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
res->max_tx_qs = 1;
} else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
struct be_resources super_nic_res = {0};
@@ -4075,7 +4325,7 @@ static int be_func_init(struct be_adapter *adapter)
msleep(100);
/* We can clear all errors when function reset succeeds */
- be_clear_all_error(adapter);
+ be_clear_error(adapter, BE_CLEAR_ALL);
}
/* Tell FW we're ready to fire cmds */
@@ -4182,7 +4432,7 @@ static void be_netpoll(struct net_device *netdev)
int i;
for_all_evt_queues(adapter, eqo, i) {
- be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
napi_schedule(&eqo->napi);
}
}
@@ -4666,14 +4916,11 @@ static int lancer_fw_download(struct be_adapter *adapter,
return 0;
}
-#define BE2_UFI 2
-#define BE3_UFI 3
-#define BE3R_UFI 10
-#define SH_UFI 4
-#define SH_P2_UFI 11
-
-static int be_get_ufi_type(struct be_adapter *adapter,
- struct flash_file_hdr_g3 *fhdr)
+/* Check if the flash image file is compatible with the adapter that
+ * is being flashed.
+ */
+static bool be_check_ufi_compatibility(struct be_adapter *adapter,
+ struct flash_file_hdr_g3 *fhdr)
{
if (!fhdr) {
dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
@@ -4685,43 +4932,22 @@ static int be_get_ufi_type(struct be_adapter *adapter,
*/
switch (fhdr->build[0]) {
case BLD_STR_UFI_TYPE_SH:
- return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
- SH_UFI;
+ if (!skyhawk_chip(adapter))
+ return false;
+ break;
case BLD_STR_UFI_TYPE_BE3:
- return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
- BE3_UFI;
+ if (!BE3_chip(adapter))
+ return false;
+ break;
case BLD_STR_UFI_TYPE_BE2:
- return BE2_UFI;
- default:
- return -1;
- }
-}
-
-/* Check if the flash image file is compatible with the adapter that
- * is being flashed.
- * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
- * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
- */
-static bool be_check_ufi_compatibility(struct be_adapter *adapter,
- struct flash_file_hdr_g3 *fhdr)
-{
- int ufi_type = be_get_ufi_type(adapter, fhdr);
-
- switch (ufi_type) {
- case SH_P2_UFI:
- return skyhawk_chip(adapter);
- case SH_UFI:
- return (skyhawk_chip(adapter) &&
- adapter->asic_rev < ASIC_REV_P2);
- case BE3R_UFI:
- return BE3_chip(adapter);
- case BE3_UFI:
- return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
- case BE2_UFI:
- return BE2_chip(adapter);
+ if (!BE2_chip(adapter))
+ return false;
+ break;
default:
return false;
}
+
+ return (fhdr->asic_type_rev >= adapter->asic_rev);
}
static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
@@ -4829,7 +5055,7 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
adapter->if_handle,
mode == BRIDGE_MODE_VEPA ?
PORT_FWD_TYPE_VEPA :
- PORT_FWD_TYPE_VEB);
+ PORT_FWD_TYPE_VEB, 0);
if (status)
goto err;
@@ -4861,7 +5087,8 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
hsw_mode = PORT_FWD_TYPE_VEB;
} else {
status = be_cmd_get_hsw_config(adapter, NULL, 0,
- adapter->if_handle, &hsw_mode);
+ adapter->if_handle, &hsw_mode,
+ NULL);
if (status)
return 0;
}
@@ -5014,6 +5241,7 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_set_vf_rate = be_set_vf_tx_rate,
.ndo_get_vf_config = be_get_vf_config,
.ndo_set_vf_link_state = be_set_vf_link_state,
+ .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = be_netpoll,
#endif
@@ -5118,7 +5346,7 @@ static void be_err_detection_task(struct work_struct *work)
be_detect_error(adapter);
- if (adapter->hw_error) {
+ if (be_check_error(adapter, BE_ERROR_HW)) {
be_cleanup(adapter);
/* As of now error recovery support is in Lancer only */
@@ -5182,7 +5410,9 @@ static void be_worker(struct work_struct *work)
be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
}
- be_eqd_update(adapter);
+ /* EQ-delay update for Skyhawk is done while notifying EQ */
+ if (!skyhawk_chip(adapter))
+ be_eqd_update(adapter, false);
if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
be_log_sfp_info(adapter);
@@ -5202,7 +5432,7 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
static int db_bar(struct be_adapter *adapter)
{
- if (lancer_chip(adapter) || !be_physfn(adapter))
+ if (lancer_chip(adapter) || be_virtfn(adapter))
return 0;
else
return 4;
@@ -5381,6 +5611,30 @@ static void be_remove(struct pci_dev *pdev)
free_netdev(adapter->netdev);
}
+static ssize_t be_hwmon_show_temp(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct be_adapter *adapter = dev_get_drvdata(dev);
+
+ /* Unit: millidegree Celsius */
+ if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
+ return -EIO;
+ else
+ return sprintf(buf, "%u\n",
+ adapter->hwmon_info.be_on_die_temp * 1000);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ be_hwmon_show_temp, NULL, 1);
+
+static struct attribute *be_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(be_hwmon);
+
static char *mc_name(struct be_adapter *adapter)
{
char *str = ""; /* default */
@@ -5500,6 +5754,16 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
be_schedule_err_detection(adapter);
+ /* On Die temperature not supported for VF. */
+ if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
+ adapter->hwmon_info.hwmon_dev =
+ devm_hwmon_device_register_with_groups(&pdev->dev,
+ DRV_NAME,
+ adapter,
+ be_hwmon_groups);
+ adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
+ }
+
dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
func_name(adapter), mc_name(adapter), adapter->port_name);
@@ -5592,8 +5856,8 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
dev_err(&adapter->pdev->dev, "EEH error detected\n");
- if (!adapter->eeh_error) {
- adapter->eeh_error = true;
+ if (!be_check_error(adapter, BE_ERROR_EEH)) {
+ be_set_error(adapter, BE_ERROR_EEH);
be_cancel_err_detection(adapter);
@@ -5640,7 +5904,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
pci_cleanup_aer_uncorrect_error_status(pdev);
- be_clear_all_error(adapter);
+ be_clear_error(adapter, BE_CLEAR_ALL);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 132866433a25..60368207bf58 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index e6f7eb1a7d87..cde6ef905ec4 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 66d47e448e4d..bf4cf3fbb5f2 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2118,6 +2118,82 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
}
+static int fec_enet_get_regs_len(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct resource *r;
+ int s = 0;
+
+ r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
+ if (r)
+ s = resource_size(r);
+
+ return s;
+}
+
+/* List of registers that can be safety be read to dump them with ethtool */
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+ defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+static u32 fec_enet_register_offset[] = {
+ FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
+ FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
+ FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
+ FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
+ FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
+ FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
+ FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
+ FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
+ FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
+ FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
+ FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
+ FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
+ RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
+ RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
+ RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
+ RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
+ RMON_T_P_GTE2048, RMON_T_OCTETS,
+ IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
+ IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
+ IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
+ RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
+ RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
+ RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
+ RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
+ RMON_R_P_GTE2048, RMON_R_OCTETS,
+ IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
+ IEEE_R_FDXFC, IEEE_R_OCTETS_OK
+};
+#else
+static u32 fec_enet_register_offset[] = {
+ FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
+ FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
+ FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
+ FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
+ FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
+ FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
+ FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
+ FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
+ FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
+};
+#endif
+
+static void fec_enet_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *regbuf)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+ u32 *buf = (u32 *)regbuf;
+ u32 i, off;
+
+ memset(buf, 0, regs->len);
+
+ for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
+ off = fec_enet_register_offset[i] / 4;
+ buf[off] = readl(&theregs[off]);
+ }
+}
+
static int fec_enet_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
@@ -2515,6 +2591,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
+ .get_regs_len = fec_enet_get_regs_len,
+ .get_regs = fec_enet_get_regs,
.nway_reset = fec_enet_nway_reset,
.get_link = ethtool_op_get_link,
.get_coalesce = fec_enet_get_coalesce,
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a583d89b13c4..a15663ad7f5e 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -353,6 +353,7 @@ static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
writel(tmp, fep->hwp + FEC_ATIME_INC);
+ corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
writel(corr_period, fep->hwp + FEC_ATIME_CORR);
/* dummy read to update the timer. */
timecounter_read(&fep->tc);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4ee080d49bc0..ff875028fdff 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -516,6 +516,15 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
return &dev->stats;
}
+static int gfar_set_mac_addr(struct net_device *dev, void *p)
+{
+ eth_mac_addr(dev, p);
+
+ gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
+
+ return 0;
+}
+
static const struct net_device_ops gfar_netdev_ops = {
.ndo_open = gfar_enet_open,
.ndo_start_xmit = gfar_start_xmit,
@@ -526,7 +535,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
.ndo_get_stats = gfar_get_stats,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = gfar_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = gfar_netpoll,
@@ -1411,6 +1420,8 @@ static int gfar_probe(struct platform_device *ofdev)
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
gfar_init_addr_hash_table(priv);
/* Insert receive time stamps into padding alignment bytes */
@@ -2254,7 +2265,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i, rq = 0;
int do_tstamp, do_csum, do_vlan;
u32 bufaddr;
- unsigned long flags;
unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
rq = skb->queue_mapping;
@@ -2434,19 +2444,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(txq, bytes_sent);
- /* We can work in parallel with gfar_clean_tx_ring(), except
- * when modifying num_txbdfree. Note that we didn't grab the lock
- * when we were reading the num_txbdfree and checking for available
- * space, that's because outside of this function it can only grow,
- * and once we've got needed space, it cannot suddenly disappear.
- *
- * The lock also protects us from gfar_error(), which can modify
- * regs->tstat and thus retrigger the transfers, which is why we
- * also must grab the lock before setting ready bit for the first
- * to be transmitted BD.
- */
- spin_lock_irqsave(&tx_queue->txlock, flags);
-
gfar_wmb();
txbdp_start->lstatus = cpu_to_be32(lstatus);
@@ -2463,8 +2460,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ /* We can work in parallel with gfar_clean_tx_ring(), except
+ * when modifying num_txbdfree. Note that we didn't grab the lock
+ * when we were reading the num_txbdfree and checking for available
+ * space, that's because outside of this function it can only grow.
+ */
+ spin_lock_bh(&tx_queue->txlock);
/* reduce TxBD free count */
tx_queue->num_txbdfree -= (nr_txbds);
+ spin_unlock_bh(&tx_queue->txlock);
/* If the next BD still needs to be cleaned up, then the bds
* are full. We need to tell the kernel to stop sending us stuff.
@@ -2478,9 +2482,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Tell the DMA to go go go */
gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
- /* Unlock priv */
- spin_unlock_irqrestore(&tx_queue->txlock, flags);
-
return NETDEV_TX_OK;
dma_map_err:
@@ -2622,7 +2623,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
skb_dirtytx = tx_queue->skb_dirtytx;
while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
- unsigned long flags;
frags = skb_shinfo(skb)->nr_frags;
@@ -2686,9 +2686,9 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
TX_RING_MOD_MASK(tx_ring_size);
howmany++;
- spin_lock_irqsave(&tx_queue->txlock, flags);
+ spin_lock(&tx_queue->txlock);
tx_queue->num_txbdfree += nr_txbds;
- spin_unlock_irqrestore(&tx_queue->txlock, flags);
+ spin_unlock(&tx_queue->txlock);
}
/* If we freed a buffer, we can restart transmission, if necessary */
@@ -3411,21 +3411,12 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
if (events & IEVENT_CRL)
dev->stats.tx_aborted_errors++;
if (events & IEVENT_XFUN) {
- unsigned long flags;
-
netif_dbg(priv, tx_err, dev,
"TX FIFO underrun, packet dropped\n");
dev->stats.tx_dropped++;
atomic64_inc(&priv->extra_stats.tx_underrun);
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- /* Reactivate the Tx Queues */
- gfar_write(&regs->tstat, gfargrp->tstat);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
+ schedule_work(&priv->reset_task);
}
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 3b39fdddeb57..d49bee38cd31 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -798,7 +798,7 @@ static void hip04_free_ring(struct net_device *ndev, struct device *d)
for (i = 0; i < RX_DESC_NUM; i++)
if (priv->rx_buf[i])
- put_page(virt_to_head_page(priv->rx_buf[i]));
+ skb_free_frag(priv->rx_buf[i]);
for (i = 0; i < TX_DESC_NUM; i++)
if (priv->tx_skb[i])
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 0ffdcd381fdd..a5e077eac99a 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -500,7 +500,6 @@ static int hix5hd2_rx(struct net_device *dev, int limit)
napi_gro_receive(&priv->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
- dev->last_rx = jiffies;
next:
pos = dma_ring_incr(pos, RX_DESC_NUM);
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 18134766a114..29bbb628d712 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -58,7 +58,7 @@ static struct kobj_type ktype_veth_pool;
static const char ibmveth_driver_name[] = "ibmveth";
static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
-#define ibmveth_driver_version "1.04"
+#define ibmveth_driver_version "1.05"
MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
@@ -100,6 +100,8 @@ struct ibmveth_stat ibmveth_stats[] = {
{ "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
{ "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
{ "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
+ { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
+ { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }
};
/* simple methods of getting data from the current rxq entry */
@@ -852,6 +854,10 @@ static int ibmveth_set_features(struct net_device *dev,
struct ibmveth_adapter *adapter = netdev_priv(dev);
int rx_csum = !!(features & NETIF_F_RXCSUM);
int rc;
+ netdev_features_t changed = features ^ dev->features;
+
+ if (features & NETIF_F_TSO & changed)
+ netdev_info(dev, "TSO feature requires all partitions to have updated driver");
if (rx_csum == adapter->rx_csum)
return 0;
@@ -1035,6 +1041,15 @@ retry_bounce:
descs[i+1].fields.address = dma_addr;
}
+ if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) {
+ /* Put -1 in the IP checksum to tell phyp it
+ * is a largesend packet and put the mss in the TCP checksum.
+ */
+ ip_hdr(skb)->check = 0xffff;
+ tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ adapter->tx_large_packets++;
+ }
+
if (ibmveth_send(adapter, descs)) {
adapter->tx_send_failed++;
netdev->stats.tx_dropped++;
@@ -1080,6 +1095,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
struct net_device *netdev = adapter->netdev;
int frames_processed = 0;
unsigned long lpar_rc;
+ struct iphdr *iph;
restart_poll:
while (frames_processed < budget) {
@@ -1122,10 +1138,23 @@ restart_poll:
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, netdev);
- if (csum_good)
+ if (csum_good) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
+ iph = (struct iphdr *)skb->data;
+
+ /* If the IP checksum is not offloaded and if the packet
+ * is large send, the checksum must be rebuilt.
+ */
+ if (iph->check == 0xffff) {
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ adapter->rx_large_packets++;
+ }
+ }
+ }
- netif_receive_skb(skb); /* send it up */
+ napi_gro_receive(napi, skb); /* send it up */
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += length;
@@ -1422,8 +1451,14 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->features |= netdev->hw_features;
+ /* TSO is disabled by default */
+ netdev->hw_features |= NETIF_F_TSO;
+
memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
+ if (firmware_has_feature(FW_FEATURE_CMO))
+ memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
+
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
int error;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 1f37499d4398..41dedb1fb2ae 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -104,7 +104,8 @@ static inline long h_illan_attributes(unsigned long unit_address,
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
-static int pool_active[] = { 1, 1, 0, 0, 0};
+static int pool_count_cmo[] = { 256, 512, 256, 256, 64 };
+static int pool_active[] = { 1, 1, 0, 0, 1};
#define IBM_VETH_INVALID_MAP ((u16)0xffff)
@@ -160,6 +161,8 @@ struct ibmveth_adapter {
u64 rx_no_buffer;
u64 tx_map_failed;
u64 tx_send_failed;
+ u64 tx_large_packets;
+ u64 rx_large_packets;
};
/*
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 1a450f4b6b12..d2657a412768 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -874,7 +874,7 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
{
struct cb *cb;
unsigned long flags;
- int err = 0;
+ int err;
spin_lock_irqsave(&nic->cb_lock, flags);
@@ -2922,9 +2922,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- init_timer(&nic->watchdog);
- nic->watchdog.function = e100_watchdog;
- nic->watchdog.data = (unsigned long)nic;
+ setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic);
INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 983eb4e6f7aa..74dc15055971 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2079,11 +2079,6 @@ static void *e1000_alloc_frag(const struct e1000_adapter *a)
return data;
}
-static void e1000_free_frag(const void *data)
-{
- put_page(virt_to_head_page(data));
-}
-
/**
* e1000_clean_rx_ring - Free Rx Buffers per Queue
* @adapter: board private structure
@@ -2107,7 +2102,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (buffer_info->rxbuf.data) {
- e1000_free_frag(buffer_info->rxbuf.data);
+ skb_free_frag(buffer_info->rxbuf.data);
buffer_info->rxbuf.data = NULL;
}
} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
@@ -4594,28 +4589,28 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
data = e1000_alloc_frag(adapter);
/* Failed allocation, critical failure */
if (!data) {
- e1000_free_frag(olddata);
+ skb_free_frag(olddata);
adapter->alloc_rx_buff_failed++;
break;
}
if (!e1000_check_64k_bound(adapter, data, bufsz)) {
/* give up */
- e1000_free_frag(data);
- e1000_free_frag(olddata);
+ skb_free_frag(data);
+ skb_free_frag(olddata);
adapter->alloc_rx_buff_failed++;
break;
}
/* Use new allocation */
- e1000_free_frag(olddata);
+ skb_free_frag(olddata);
}
buffer_info->dma = dma_map_single(&pdev->dev,
data,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- e1000_free_frag(data);
+ skb_free_frag(data);
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
break;
@@ -4637,7 +4632,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
- e1000_free_frag(data);
+ skb_free_frag(data);
buffer_info->rxbuf.data = NULL;
buffer_info->dma = 0;
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 08f22f348800..2af603f3e418 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
index 535a9430976d..a2162e11673e 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index dc79ed85030b..5f7016442ec4 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -2010,7 +2010,7 @@ const struct e1000_info e1000_82573_info = {
.flags2 = FLAG2_DISABLE_ASPM_L1
| FLAG2_DISABLE_ASPM_L0S,
.pba = 20,
- .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
+ .max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_m88,
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 2e758f796d60..abc6a9abff98 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 0570c668ec3d..133d4074dbe4 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 0abc942c966e..0b748d1959d9 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -98,6 +98,8 @@ struct e1000_info;
#define DEFAULT_RADV 8
#define BURST_RDTR 0x20
#define BURST_RADV 0x20
+#define PCICFG_DESC_RING_STATUS 0xe4
+#define FLUSH_DESC_REQUIRED 0x100
/* in the case of WTHRESH, it appears at least the 82571/2 hardware
* writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
@@ -384,6 +386,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define INCVALUE_SHIFT_25MHz 18
#define INCPERIOD_25MHz 1
+#define INCVALUE_24MHz 125
+#define INCVALUE_SHIFT_24MHz 14
+#define INCPERIOD_24MHz 3
+
/* Another drawback of scaling the incvalue by a large factor is the
* 64-bit SYSTIM register overflows more quickly. This is dealt with
* by simply reading the clock before it overflows.
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 11f486e4ff7b..ad6daa656d3e 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -1516,8 +1516,19 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
-
+ u32 rctl, fext_nvm11, tarc0;
+
+ if (hw->mac.type == e1000_pch_spt) {
+ fext_nvm11 = er32(FEXTNVM11);
+ fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
+ ew32(FEXTNVM11, fext_nvm11);
+ tarc0 = er32(TARC(0));
+ /* clear bits 28 & 29 (control of MULR concurrent requests) */
+ tarc0 &= 0xcfffffff;
+ /* set bit 29 (value of MULR requests is now 2) */
+ tarc0 |= 0x20000000;
+ ew32(TARC(0), tarc0);
+ }
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
switch (hw->mac.type) {
@@ -1542,7 +1553,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
+ u32 rctl, fext_nvm11, tarc0;
u16 phy_reg;
rctl = er32(RCTL);
@@ -1550,6 +1561,16 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
ew32(RCTL, rctl);
switch (hw->mac.type) {
+ case e1000_pch_spt:
+ fext_nvm11 = er32(FEXTNVM11);
+ fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
+ ew32(FEXTNVM11, fext_nvm11);
+ tarc0 = er32(TARC(0));
+ /* clear bits 28 & 29 (control of MULR concurrent requests) */
+ /* set bit 29 (value of MULR requests is now 0) */
+ tarc0 &= 0xcfffffff;
+ ew32(TARC(0), tarc0);
+ /* fall through */
case e1000_80003es2lan:
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 19e8c487db06..c9da4654e9ca 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9d81c0317433..b074b9a667b3 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -1014,8 +1014,7 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
u16 speed, duplex, scale = 0;
u16 max_snoop, max_nosnoop;
u16 max_ltr_enc; /* max LTR latency encoded */
- s64 lat_ns; /* latency (ns) */
- s64 value;
+ u64 value;
u32 rxa;
if (!hw->adapter->max_frame_size) {
@@ -1040,14 +1039,11 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
* 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
* 1=2^5ns, 2=2^10ns,...5=2^25ns.
*/
- lat_ns = ((s64)rxa * 1024 -
- (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000;
- if (lat_ns < 0)
- lat_ns = 0;
- else
- do_div(lat_ns, speed);
+ rxa *= 512;
+ value = (rxa > hw->adapter->max_frame_size) ?
+ (rxa - hw->adapter->max_frame_size) * (16000 / speed) :
+ 0;
- value = lat_ns;
while (value > PCI_LTR_VALUE_MASK) {
scale++;
value = DIV_ROUND_UP(value, (1 << 5));
@@ -1563,7 +1559,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
((adapter->hw.mac.type >= e1000_pch2lan) &&
(!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
- adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
+ adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
hw->mac.ops.blink_led = NULL;
}
@@ -5681,7 +5677,7 @@ const struct e1000_info e1000_ich8_info = {
| FLAG_HAS_FLASH
| FLAG_APME_IN_WUC,
.pba = 8,
- .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
+ .max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
@@ -5754,7 +5750,7 @@ const struct e1000_info e1000_pch2_info = {
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
.pba = 26,
- .max_hw_frame_size = 9018,
+ .max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
@@ -5774,7 +5770,7 @@ const struct e1000_info e1000_pch_lpt_info = {
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
.pba = 26,
- .max_hw_frame_size = 9018,
+ .max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
@@ -5794,7 +5790,7 @@ const struct e1000_info e1000_pch_spt_info = {
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
.pba = 26,
- .max_hw_frame_size = 9018,
+ .max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 770a573b9eea..26459853c6be 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -98,8 +98,15 @@
#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000
/* bit for disabling packet buffer read */
#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000
-
+#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004
#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
+#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800
+#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000
+#define E1000_FEXTNVM11_DISABLE_PB_READ 0x00000200
+#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000
+
+/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
+#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
#define K1_ENTRY_LATENCY 0
#define K1_MIN_TIME 1
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 30b74d590bee..e59d7c283cd4 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index 0513d90cdeea..8284618af9ff 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index 06edfca1a35e..cc9b3befc2bc 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
index a8c27f98f7b0..0b9ea5952b07 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.h
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index c509a5c900f5..e62b9dcb91fe 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -48,7 +48,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
+#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -3525,22 +3525,30 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
switch (hw->mac.type) {
case e1000_pch2lan:
case e1000_pch_lpt:
- case e1000_pch_spt:
- /* On I217, I218 and I219, the clock frequency is 25MHz
- * or 96MHz as indicated by the System Clock Frequency
- * Indication
- */
- if (((hw->mac.type != e1000_pch_lpt) &&
- (hw->mac.type != e1000_pch_spt)) ||
- (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 96MHz frequency */
incperiod = INCPERIOD_96MHz;
incvalue = INCVALUE_96MHz;
shift = INCVALUE_SHIFT_96MHz;
adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
+ } else {
+ /* Stable 25MHz frequency */
+ incperiod = INCPERIOD_25MHz;
+ incvalue = INCVALUE_25MHz;
+ shift = INCVALUE_SHIFT_25MHz;
+ adapter->cc.shift = shift;
+ }
+ break;
+ case e1000_pch_spt:
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
+ /* Stable 24MHz frequency */
+ incperiod = INCPERIOD_24MHz;
+ incvalue = INCVALUE_24MHz;
+ shift = INCVALUE_SHIFT_24MHz;
+ adapter->cc.shift = shift;
break;
}
- /* fall-through */
+ return -EINVAL;
case e1000_82574:
case e1000_82583:
/* Stable 25MHz frequency */
@@ -3788,6 +3796,108 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
}
/**
+ * e1000_flush_tx_ring - remove all descriptors from the tx_ring
+ *
+ * We want to clear all pending descriptors from the TX ring.
+ * zeroing happens when the HW reads the regs. We assign the ring itself as
+ * the data of the next descriptor. We don't care about the data we are about
+ * to reset the HW.
+ */
+static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc = NULL;
+ u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
+ u16 size = 512;
+
+ tctl = er32(TCTL);
+ ew32(TCTL, tctl | E1000_TCTL_EN);
+ tdt = er32(TDT(0));
+ BUG_ON(tdt != tx_ring->next_to_use);
+ tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
+ tx_desc->buffer_addr = tx_ring->dma;
+
+ tx_desc->lower.data = cpu_to_le32(txd_lower | size);
+ tx_desc->upper.data = 0;
+ /* flush descriptors to memory before notifying the HW */
+ wmb();
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+ ew32(TDT(0), tx_ring->next_to_use);
+ mmiowb();
+ usleep_range(200, 250);
+}
+
+/**
+ * e1000_flush_rx_ring - remove all descriptors from the rx_ring
+ *
+ * Mark all descriptors in the RX ring as consumed and disable the rx ring
+ */
+static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
+{
+ u32 rctl, rxdctl;
+ struct e1000_hw *hw = &adapter->hw;
+
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ e1e_flush();
+ usleep_range(100, 150);
+
+ rxdctl = er32(RXDCTL(0));
+ /* zero the lower 14 bits (prefetch and host thresholds) */
+ rxdctl &= 0xffffc000;
+
+ /* update thresholds: prefetch threshold to 31, host threshold to 1
+ * and make sure the granularity is "descriptors" and not "cache lines"
+ */
+ rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
+
+ ew32(RXDCTL(0), rxdctl);
+ /* momentarily enable the RX ring for the changes to take effect */
+ ew32(RCTL, rctl | E1000_RCTL_EN);
+ e1e_flush();
+ usleep_range(100, 150);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+}
+
+/**
+ * e1000_flush_desc_rings - remove all descriptors from the descriptor rings
+ *
+ * In i219, the descriptor rings must be emptied before resetting the HW
+ * or before changing the device state to D3 during runtime (runtime PM).
+ *
+ * Failure to do this will cause the HW to enter a unit hang state which can
+ * only be released by PCI reset on the device
+ *
+ */
+
+static void e1000_flush_desc_rings(struct e1000_adapter *adapter)
+{
+ u16 hang_state;
+ u32 fext_nvm11, tdlen;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* First, disable MULR fix in FEXTNVM11 */
+ fext_nvm11 = er32(FEXTNVM11);
+ fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
+ ew32(FEXTNVM11, fext_nvm11);
+ /* do nothing if we're not in faulty state, or if the queue is empty */
+ tdlen = er32(TDLEN(0));
+ pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
+ &hang_state);
+ if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
+ return;
+ e1000_flush_tx_ring(adapter);
+ /* recheck, maybe the fault is caused by the rx ring */
+ pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
+ &hang_state);
+ if (hang_state & FLUSH_DESC_REQUIRED)
+ e1000_flush_rx_ring(adapter);
+}
+
+/**
* e1000e_reset - bring the hardware into a known good state
*
* This function boots the hardware and enables some settings that
@@ -3807,7 +3917,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
/* reset Packet Buffer Allocation to default */
ew32(PBA, pba);
- if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
+ if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) {
/* To maintain wire speed transmits, the Tx FIFO should be
* large enough to accommodate two full transmit packets,
* rounded up to the next 1KB and expressed in KB. Likewise,
@@ -3943,6 +4053,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
}
}
+ if (hw->mac.type == e1000_pch_spt)
+ e1000_flush_desc_rings(adapter);
/* Allow time for pending master requests to run */
mac->ops.reset_hw(hw);
@@ -4016,6 +4128,20 @@ void e1000e_reset(struct e1000_adapter *adapter)
phy_data &= ~IGP02E1000_PM_SPD;
e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
}
+ if (hw->mac.type == e1000_pch_spt && adapter->int_mode == 0) {
+ u32 reg;
+
+ /* Fextnvm7 @ 0xe4[2] = 1 */
+ reg = er32(FEXTNVM7);
+ reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE;
+ ew32(FEXTNVM7, reg);
+ /* Fextnvm9 @ 0x5bb4[13:12] = 11 */
+ reg = er32(FEXTNVM9);
+ reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS |
+ E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS;
+ ew32(FEXTNVM9, reg);
+ }
+
}
int e1000e_up(struct e1000_adapter *adapter)
@@ -4115,8 +4241,6 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
spin_unlock(&adapter->stats64_lock);
e1000e_flush_descriptors(adapter);
- e1000_clean_tx_ring(adapter->tx_ring);
- e1000_clean_rx_ring(adapter->rx_ring);
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -4127,8 +4251,14 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
e1000_lv_jumbo_workaround_ich8lan(hw, false))
e_dbg("failed to disable jumbo frame workaround mode\n");
- if (reset && !pci_channel_offline(adapter->pdev))
- e1000e_reset(adapter);
+ if (!pci_channel_offline(adapter->pdev)) {
+ if (reset)
+ e1000e_reset(adapter);
+ else if (hw->mac.type == e1000_pch_spt)
+ e1000_flush_desc_rings(adapter);
+ }
+ e1000_clean_tx_ring(adapter->tx_ring);
+ e1000_clean_rx_ring(adapter->rx_ring);
}
void e1000e_reinit_locked(struct e1000_adapter *adapter)
@@ -4151,9 +4281,16 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
cc);
struct e1000_hw *hw = &adapter->hw;
cycle_t systim, systim_next;
+ /* SYSTIMH latching upon SYSTIML read does not work well. To fix that
+ * we don't want to allow overflow of SYSTIML and a change to SYSTIMH
+ * to occur between reads, so if we read a vale close to overflow, we
+ * wait for overflow to occur and read both registers when its safe.
+ */
+ u32 systim_overflow_latch_fix = 0x3FFFFFFF;
- /* latch SYSTIMH on read of SYSTIML */
- systim = (cycle_t)er32(SYSTIML);
+ do {
+ systim = (cycle_t)er32(SYSTIML);
+ } while (systim > systim_overflow_latch_fix);
systim |= (cycle_t)er32(SYSTIMH) << 32;
if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
@@ -4196,9 +4333,9 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
+ adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
adapter->rx_ps_bsize0 = 128;
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
adapter->tx_ring_count = E1000_DEFAULT_TXD;
adapter->rx_ring_count = E1000_DEFAULT_RXD;
@@ -5781,17 +5918,17 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
+ int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
/* Jumbo frame support */
- if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+ if ((max_frame > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) &&
!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
e_err("Jumbo Frames not supported.\n");
return -EINVAL;
}
/* Supported frame sizes */
- if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
+ if ((new_mtu < (VLAN_ETH_ZLEN + ETH_FCS_LEN)) ||
(max_frame > adapter->max_hw_frame_size)) {
e_err("Unsupported MTU setting\n");
return -EINVAL;
@@ -5831,10 +5968,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
adapter->rx_buffer_len = 4096;
/* adjust allocation if LPE protects us, and we aren't using SBP */
- if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
- (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
- adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
- + ETH_FCS_LEN;
+ if (max_frame <= (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN))
+ adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
if (netif_running(netdev))
e1000e_up(adapter);
@@ -6678,6 +6813,19 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
}
}
+static netdev_features_t e1000_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
+ if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
+ features &= ~NETIF_F_RXFCS;
+
+ return features;
+}
+
static int e1000_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -6734,6 +6882,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_poll_controller = e1000_netpoll,
#endif
.ndo_set_features = e1000_set_features,
+ .ndo_fix_features = e1000_fix_features,
};
/**
@@ -7289,7 +7438,7 @@ static int __init e1000_init_module(void)
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
+ pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index fa6b1036a327..49f205c023bf 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
index 342bf69efab5..5d46967e0d1f 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.h
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index aa1923f7ebdd..6d8c39abee16 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index b2005e13fb01..de13aeacae97 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 537d2780b408..55bfe473514d 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 8d7b21dc7e19..25a0ad5102d6 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 85eefc4832ba..b24e5fee17f2 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,8 @@
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 5d47307121ab..ec76c3fa3a04 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -182,6 +182,7 @@ struct i40e_lump_tracking {
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
I40E_FD_STAT_SB,
+ I40E_FD_STAT_ATR_TUNNEL,
I40E_FD_STAT_PF_COUNT
};
#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
@@ -189,6 +190,8 @@ enum i40e_fd_stat_idx {
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
#define I40E_FD_SB_STAT_IDX(pf_id) \
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
+#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
struct i40e_fdir_filter {
struct hlist_node fdir_node;
@@ -263,8 +266,6 @@ struct i40e_pf {
struct hlist_head fdir_filter_list;
u16 fdir_pf_active_filters;
- u16 fd_sb_cnt_idx;
- u16 fd_atr_cnt_idx;
unsigned long fd_flush_timestamp;
u32 fd_flush_cnt;
u32 fd_add_err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4cbaaeb902c4..9a68c65b17ea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -147,6 +147,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
+ I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
/* LPI stats */
@@ -1548,6 +1549,17 @@ static int i40e_loopback_test(struct net_device *netdev, u64 *data)
return *data;
}
+static inline bool i40e_active_vfs(struct i40e_pf *pf)
+{
+ struct i40e_vf *vfs = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE)
+ return true;
+ return false;
+}
+
static void i40e_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
@@ -1560,6 +1572,20 @@ static void i40e_diag_test(struct net_device *netdev,
netif_info(pf, drv, netdev, "offline testing starting\n");
set_bit(__I40E_TESTING, &pf->state);
+
+ if (i40e_active_vfs(pf)) {
+ dev_warn(&pf->pdev->dev,
+ "Please take active VFS offline and restart the adapter before running NIC diagnostics\n");
+ data[I40E_ETH_TEST_REG] = 1;
+ data[I40E_ETH_TEST_EEPROM] = 1;
+ data[I40E_ETH_TEST_INTR] = 1;
+ data[I40E_ETH_TEST_LOOPBACK] = 1;
+ data[I40E_ETH_TEST_LINK] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, &pf->state);
+ goto skip_ol_tests;
+ }
+
/* If the device is online then take it offline */
if (if_running)
/* indicate we're in test mode */
@@ -1605,6 +1631,8 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_LOOPBACK] = 0;
}
+skip_ol_tests:
+
netif_info(pf, drv, netdev, "testing finished\n");
}
@@ -2265,7 +2293,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->pctype = 0;
input->dest_vsi = vsi->id;
input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
- input->cnt_index = pf->fd_sb_cnt_idx;
+ input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
input->flow_type = fsp->flow_type;
input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 1803afeef23e..c8b621e0e7cd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -118,7 +118,7 @@ static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
*
* The FC EOF is converted to the value understood by HW for descriptor
* programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
- * first.
+ * first and that already checks for all supported valid eof values.
**/
static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
{
@@ -132,9 +132,12 @@ static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
case FC_EOF_A:
return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
default:
- /* FIXME: still returns 0 */
- pr_err("Unrecognized EOF %x\n", eof);
- return 0;
+ /* Supported valid eof shall be already checked by
+ * calling i40e_fcoe_eof_is_supported() first,
+ * therefore this default case shall never hit.
+ */
+ WARN_ON(1);
+ return -EINVAL;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 5b5bea159bd5..52d7d8b8f1f9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_BUILD 4
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -772,9 +772,8 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
dcb_cfg = &hw->local_dcbx_config;
- /* See if DCB enabled with PFC TC */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
- !(dcb_cfg->pfc.pfcenable)) {
+ /* Collect Link XOFF stats when PFC is disabled */
+ if (!dcb_cfg->pfc.pfcenable) {
i40e_update_link_xoff_rx(pf);
return;
}
@@ -1097,12 +1096,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
&osd->rx_jabber, &nsd->rx_jabber);
/* FDIR stats */
- i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
+ i40e_stat_update32(hw,
+ I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
pf->stat_offsets_loaded,
&osd->fd_atr_match, &nsd->fd_atr_match);
- i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
+ i40e_stat_update32(hw,
+ I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
pf->stat_offsets_loaded,
&osd->fd_sb_match, &nsd->fd_sb_match);
+ i40e_stat_update32(hw,
+ I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
+ pf->stat_offsets_loaded,
+ &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
val = rd32(hw, I40E_PRTPM_EEE_STAT);
nsd->tx_lpi_status =
@@ -4739,7 +4744,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
pf->fd_add_err = pf->fd_atr_cnt = 0;
if (pf->fd_tcp_rule > 0) {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
pf->fd_tcp_rule = 0;
}
i40e_fdir_filter_restore(vsi);
@@ -5428,7 +5434,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
- dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
}
/* Wait for some more space to be available to turn on ATR */
@@ -5436,7 +5443,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
}
}
}
@@ -5469,7 +5477,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (!(time_after(jiffies, min_flush_time)) &&
(fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
- dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
disable_atr = true;
}
@@ -5496,7 +5505,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (!disable_atr)
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
- dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
}
}
@@ -7680,12 +7690,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
- /* Setup a counter for fd_atr per PF */
- pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- /* Setup a counter for fd_sb per PF */
- pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
} else {
dev_info(&pf->pdev->dev,
"Flow Director Sideband mode Disabled in MFP mode\n");
@@ -7775,7 +7781,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
pf->fdir_pf_active_filters = 0;
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
/* if ATR was auto disabled it can be re-enabled. */
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9d95042d5a0f..9a4f2bc70cd2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -165,9 +165,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
- /* set the timestamp */
- tx_buf->time_stamp = jiffies;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch.
*/
@@ -283,7 +280,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if (add) {
pf->fd_tcp_rule++;
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
}
} else {
@@ -291,7 +289,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
(pf->fd_tcp_rule - 1) : 0;
if (pf->fd_tcp_rule == 0) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
}
}
@@ -501,7 +500,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
!(pf->auto_disable_flags &
I40E_FLAG_FD_SB_ENABLED)) {
- dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
pf->auto_disable_flags |=
I40E_FLAG_FD_SB_ENABLED;
}
@@ -807,10 +807,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->vsi->seid,
tx_ring->queue_index,
tx_ring->next_to_use, i);
- dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- tx_ring->tx_bi[i].time_stamp, jiffies);
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
@@ -1653,9 +1649,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
- /* TODO: shouldn't we increment a counter indicating the
- * drop?
- */
continue;
}
@@ -1688,7 +1681,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_ring->netdev->last_rx = jiffies;
rx_desc->wb.qword1.status_error_len = 0;
} while (likely(total_rx_packets < budget));
@@ -1821,7 +1813,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
#endif
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_ring->netdev->last_rx = jiffies;
rx_desc->wb.qword1.status_error_len = 0;
} while (likely(total_rx_packets < budget));
@@ -1925,11 +1916,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* i40e_atr - Add a Flow Director ATR filter
* @tx_ring: ring to add programming descriptor to
* @skb: send buffer
- * @flags: send flags
+ * @tx_flags: send tx flags
* @protocol: wire protocol
**/
static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 flags, __be16 protocol)
+ u32 tx_flags, __be16 protocol)
{
struct i40e_filter_program_desc *fdir_desc;
struct i40e_pf *pf = tx_ring->vsi->back;
@@ -1954,25 +1945,38 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!tx_ring->atr_sample_rate)
return;
- /* snag network header to get L4 type and address */
- hdr.network = skb_network_header(skb);
+ if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
+ return;
- /* Currently only IPv4/IPv6 with TCP is supported */
- if (protocol == htons(ETH_P_IP)) {
- if (hdr.ipv4->protocol != IPPROTO_TCP)
- return;
+ if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
+ /* snag network header to get L4 type and address */
+ hdr.network = skb_network_header(skb);
- /* access ihl as a u8 to avoid unaligned access on ia64 */
- hlen = (hdr.network[0] & 0x0F) << 2;
- } else if (protocol == htons(ETH_P_IPV6)) {
- if (hdr.ipv6->nexthdr != IPPROTO_TCP)
+ /* Currently only IPv4/IPv6 with TCP is supported
+ * access ihl as u8 to avoid unaligned access on ia64
+ */
+ if (tx_flags & I40E_TX_FLAGS_IPV4)
+ hlen = (hdr.network[0] & 0x0F) << 2;
+ else if (protocol == htons(ETH_P_IPV6))
+ hlen = sizeof(struct ipv6hdr);
+ else
return;
-
- hlen = sizeof(struct ipv6hdr);
} else {
- return;
+ hdr.network = skb_inner_network_header(skb);
+ hlen = skb_inner_network_header_len(skb);
}
+ /* Currently only IPv4/IPv6 with TCP is supported
+ * Note: tx_flags gets modified to reflect inner protocols in
+ * tx_enable_csum function if encap is enabled.
+ */
+ if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
+ (hdr.ipv4->protocol != IPPROTO_TCP))
+ return;
+ else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
+ (hdr.ipv6->nexthdr != IPPROTO_TCP))
+ return;
+
th = (struct tcphdr *)(hdr.network + hlen);
/* Due to lack of space, no more new filters can be programmed */
@@ -2022,9 +2026,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
- dtype_cmd |=
- ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
+ dtype_cmd |=
+ ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ else
+ dtype_cmd |=
+ ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->rsvd = cpu_to_le32(0);
@@ -2045,13 +2056,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
* otherwise returns 0 to indicate the flags has been set properly.
**/
#ifdef I40E_FCOE
-int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct i40e_ring *tx_ring,
- u32 *flags)
-#else
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
+#else
+static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
#endif
{
__be16 protocol = skb->protocol;
@@ -2119,16 +2130,14 @@ out:
* i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
* @hdr_len: ptr to the size of the packet header
* @cd_tunneling: ptr to context descriptor bits
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol, u8 *hdr_len,
- u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+ u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+ u32 *cd_tunneling)
{
u32 cd_cmd, cd_tso_len, cd_mss;
struct ipv6hdr *ipv6h;
@@ -2220,12 +2229,12 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
/**
* i40e_tx_enable_csum - Enable Tx checksum offloads
* @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set
* @cd_tunneling: ptr to context desc bits
**/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
u32 *td_cmd, u32 *td_offset,
struct i40e_ring *tx_ring,
u32 *cd_tunneling)
@@ -2241,6 +2250,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+ *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
default:
return;
@@ -2250,18 +2260,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
this_ipv6_hdr = inner_ipv6_hdr(skb);
this_tcp_hdrlen = inner_tcp_hdrlen(skb);
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
-
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
ip_hdr(skb)->check = 0;
} else {
*cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
- if (tx_flags & I40E_TX_FLAGS_TSO)
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
}
@@ -2273,8 +2282,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
if (this_ip_hdr->version == 6) {
- tx_flags &= ~I40E_TX_FLAGS_IPV4;
- tx_flags |= I40E_TX_FLAGS_IPV6;
+ *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
}
} else {
network_hdr_len = skb_network_header_len(skb);
@@ -2284,12 +2293,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
}
/* Enable IP checksum offloads */
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
l4_hdr = this_ip_hdr->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
this_ip_hdr->check = 0;
} else {
@@ -2298,7 +2307,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
/* Now set the td_offset for IP header length */
*td_offset = (network_hdr_len >> 2) <<
I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
l4_hdr = this_ipv6_hdr->nexthdr;
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
/* Now set the td_offset for IP header length */
@@ -2396,9 +2405,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* Returns 0 if stop is not needed
**/
#ifdef I40E_FCOE
-int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#else
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#endif
{
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
@@ -2473,13 +2482,13 @@ linearize_chk_done:
* @td_offset: offset for checksum or crc
**/
#ifdef I40E_FCOE
-void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
-#else
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
+#else
+static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
#endif
{
unsigned int data_len = skb->data_len;
@@ -2585,9 +2594,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->queue_index),
first->bytecount);
- /* set the timestamp */
- first->time_stamp = jiffies;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -2640,11 +2646,11 @@ dma_error:
* one descriptor.
**/
#ifdef I40E_FCOE
-int i40e_xmit_descriptor_count(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
-#else
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
+#else
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
#endif
{
unsigned int f;
@@ -2706,7 +2712,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ tso = i40e_tso(tx_ring, skb, &hdr_len,
&cd_type_cmd_tso_mss, &cd_tunneling);
if (tso < 0)
@@ -2732,7 +2738,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_flags |= I40E_TX_FLAGS_CSUM;
- i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 4b0b8102cdc3..0dc48dc9ca61 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -139,6 +139,7 @@ enum i40e_dyn_idx_t {
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -146,7 +147,6 @@ enum i40e_dyn_idx_t {
struct i40e_tx_buffer {
struct i40e_tx_desc *next_to_watch;
- unsigned long time_stamp;
union {
struct sk_buff *skb;
void *raw_buf;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 568e855da0f3..9a5a75b1e2bc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1133,6 +1133,7 @@ struct i40e_hw_port_stats {
/* flow director stats */
u64 fd_atr_match;
u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4e9376da0518..23f95cdbdfcc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -980,6 +980,13 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
int pre_existing_vfs = pci_num_vf(pdev);
int err = 0;
+ if (pf->state & __I40E_TESTING) {
+ dev_warn(&pdev->dev,
+ "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
+ err = -EPERM;
+ goto err_out;
+ }
+
dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
i40e_free_vfs(pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 458fbb421090..f54996f19629 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -322,10 +322,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->vsi->seid,
tx_ring->queue_index,
tx_ring->next_to_use, i);
- dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- tx_ring->tx_bi[i].time_stamp, jiffies);
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
@@ -1128,9 +1124,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
- /* TODO: shouldn't we increment a counter indicating the
- * drop?
- */
continue;
}
@@ -1156,7 +1149,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_ring->netdev->last_rx = jiffies;
rx_desc->wb.qword1.status_error_len = 0;
} while (likely(total_rx_packets < budget));
@@ -1271,7 +1263,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
: 0;
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_ring->netdev->last_rx = jiffies;
rx_desc->wb.qword1.status_error_len = 0;
} while (likely(total_rx_packets < budget));
@@ -1352,7 +1343,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
}
/**
- * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* @skb: send buffer
* @tx_ring: ring to send buffer on
* @flags: the tx flags to be set
@@ -1363,9 +1354,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
* Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly.
**/
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct i40e_ring *tx_ring,
- u32 *flags)
+static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
{
__be16 protocol = skb->protocol;
u32 tx_flags = 0;
@@ -1408,16 +1399,14 @@ out:
* i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
* @hdr_len: ptr to the size of the packet header
* @cd_tunneling: ptr to context descriptor bits
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol, u8 *hdr_len,
- u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+ u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+ u32 *cd_tunneling)
{
u32 cd_cmd, cd_tso_len, cd_mss;
struct ipv6hdr *ipv6h;
@@ -1468,12 +1457,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
/**
* i40e_tx_enable_csum - Enable Tx checksum offloads
* @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set
* @cd_tunneling: ptr to context desc bits
**/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
u32 *td_cmd, u32 *td_offset,
struct i40e_ring *tx_ring,
u32 *cd_tunneling)
@@ -1489,6 +1478,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+ *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
default:
return;
@@ -1498,18 +1488,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
this_ipv6_hdr = inner_ipv6_hdr(skb);
this_tcp_hdrlen = inner_tcp_hdrlen(skb);
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
-
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
ip_hdr(skb)->check = 0;
} else {
*cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
- if (tx_flags & I40E_TX_FLAGS_TSO)
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
}
@@ -1521,8 +1510,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
if (this_ip_hdr->version == 6) {
- tx_flags &= ~I40E_TX_FLAGS_IPV4;
- tx_flags |= I40E_TX_FLAGS_IPV6;
+ *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
}
@@ -1534,12 +1523,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
}
/* Enable IP checksum offloads */
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
l4_hdr = this_ip_hdr->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
this_ip_hdr->check = 0;
} else {
@@ -1548,7 +1537,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
/* Now set the td_offset for IP header length */
*td_offset = (network_hdr_len >> 2) <<
I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
l4_hdr = this_ipv6_hdr->nexthdr;
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
/* Now set the td_offset for IP header length */
@@ -1672,7 +1661,44 @@ linearize_chk_done:
}
/**
- * i40e_tx_map - Build the Tx descriptor
+ * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
+ smp_mb();
+
+ /* Check again in a case another CPU has just made room available. */
+ if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+/**
+ * i40evf_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40evf_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40evf_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
* @first: first buffer info buffer to use
@@ -1681,9 +1707,9 @@ linearize_chk_done:
* @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc
**/
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
+static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
@@ -1789,9 +1815,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->queue_index),
first->bytecount);
- /* set the timestamp */
- first->time_stamp = jiffies;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -1808,8 +1831,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
+ i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- writel(i, tx_ring->tail);
+ if (!skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index)))
+ writel(i, tx_ring->tail);
return;
@@ -1831,44 +1858,7 @@ dma_error:
}
/**
- * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size: the size buffer we want to assure is available
- *
- * Returns -EBUSY if a stop is needed, else 0
- **/
-static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
- /* Memory barrier before checking head and tail */
- smp_mb();
-
- /* Check again in a case another CPU has just made room available. */
- if (likely(I40E_DESC_UNUSED(tx_ring) < size))
- return -EBUSY;
-
- /* A reprieve! - use start_queue because it doesn't call schedule */
- netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
- ++tx_ring->tx_stats.restart_queue;
- return 0;
-}
-
-/**
- * i40e_maybe_stop_tx - 1st level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size: the size buffer we want to assure is available
- *
- * Returns 0 if stop is not needed
- **/
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
- if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
- return 0;
- return __i40e_maybe_stop_tx(tx_ring, size);
-}
-
-/**
- * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
* @skb: send buffer
* @tx_ring: ring to send buffer on
*
@@ -1876,8 +1866,8 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* there is not enough descriptors available in this ring since we need at least
* one descriptor.
**/
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
+static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
{
unsigned int f;
int count = 0;
@@ -1892,7 +1882,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
count += TXD_USE_COUNT(skb_headlen(skb));
- if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return 0;
}
@@ -1918,11 +1908,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
u32 td_cmd = 0;
u8 hdr_len = 0;
int tso;
- if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+ if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
/* prepare the xmit flags */
- if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+ if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
/* obtain protocol of skb */
@@ -1937,7 +1927,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ tso = i40e_tso(tx_ring, skb, &hdr_len,
&cd_type_cmd_tso_mss, &cd_tunneling);
if (tso < 0)
@@ -1958,17 +1948,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_flags |= I40E_TX_FLAGS_CSUM;
- i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
}
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2);
- i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
- td_cmd, td_offset);
-
- i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+ i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 1e49bb1fbac1..e7a34f899f2c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -138,6 +138,7 @@ enum i40e_dyn_idx_t {
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -145,7 +146,6 @@ enum i40e_dyn_idx_t {
struct i40e_tx_buffer {
struct i40e_tx_desc *next_to_watch;
- unsigned long time_stamp;
union {
struct sk_buff *skb;
void *raw_buf;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index ec9d83a93379..c463ec41579c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1108,6 +1108,7 @@ struct i40e_hw_port_stats {
/* flow director stats */
u64 fd_atr_match;
u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a0a9b1fcb5e8..f287186192bb 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1836,31 +1836,19 @@ void igb_reinit_locked(struct igb_adapter *adapter)
*
* @adapter: adapter struct
**/
-static s32 igb_enable_mas(struct igb_adapter *adapter)
+static void igb_enable_mas(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 connsw;
- s32 ret_val = 0;
-
- connsw = rd32(E1000_CONNSW);
- if (!(hw->phy.media_type == e1000_media_type_copper))
- return ret_val;
+ u32 connsw = rd32(E1000_CONNSW);
/* configure for SerDes media detect */
- if (!(connsw & E1000_CONNSW_SERDESD)) {
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ (!(connsw & E1000_CONNSW_SERDESD))) {
connsw |= E1000_CONNSW_ENRGSRC;
connsw |= E1000_CONNSW_AUTOSENSE_EN;
wr32(E1000_CONNSW, connsw);
wrfl();
- } else if (connsw & E1000_CONNSW_SERDESD) {
- /* already SerDes, no need to enable anything */
- return ret_val;
- } else {
- netdev_info(adapter->netdev,
- "MAS: Unable to configure feature, disabling..\n");
- adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
}
- return ret_val;
}
void igb_reset(struct igb_adapter *adapter)
@@ -1980,10 +1968,9 @@ void igb_reset(struct igb_adapter *adapter)
adapter->ei.get_invariants(hw);
adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
}
- if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
- if (igb_enable_mas(adapter))
- dev_err(&pdev->dev,
- "Error enabling Media Auto Sense\n");
+ if ((mac->type == e1000_82575) &&
+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+ igb_enable_mas(adapter);
}
if (hw->mac.ops.init_hw(hw))
dev_err(&pdev->dev, "Hardware Error\n");
@@ -4989,6 +4976,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
+ unsigned short f;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
u8 hdr_len = 0;
@@ -4999,14 +4987,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
- if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
- unsigned short f;
-
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
- } else {
- count += skb_shinfo(skb)->nr_frags;
- }
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index eafa9ec802ba..9a1d0f142b09 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2594,18 +2594,35 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fdir_filter *input;
union ixgbe_atr_input mask;
+ u8 queue;
int err;
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
return -EOPNOTSUPP;
- /*
- * Don't allow programming if the action is a queue greater than
- * the number of online Rx queues.
+ /* ring_cookie is a masked into a set of queues and ixgbe pools or
+ * we use the drop index.
*/
- if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
- (fsp->ring_cookie >= adapter->num_rx_queues))
- return -EINVAL;
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ queue = IXGBE_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+ if (!vf && (ring >= adapter->num_rx_queues))
+ return -EINVAL;
+ else if (vf &&
+ ((vf > adapter->num_vfs) ||
+ ring >= adapter->num_rx_queues_per_pool))
+ return -EINVAL;
+
+ /* Map the ring onto the absolute queue index */
+ if (!vf)
+ queue = adapter->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) *
+ adapter->num_rx_queues_per_pool) + ring;
+ }
/* Don't allow indexes to exist outside of available space */
if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
@@ -2683,10 +2700,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
/* program filters to filter memory */
err = ixgbe_fdir_write_perfect_filter_82599(hw,
- &input->filter, input->sw_idx,
- (input->action == IXGBE_FDIR_DROP_QUEUE) ?
- IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[input->action]->reg_idx);
+ &input->filter, input->sw_idx, queue);
if (err)
goto err_out_w_lock;
@@ -3053,7 +3067,7 @@ static int ixgbe_get_module_info(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 status;
+ s32 status;
u8 sff8472_rev, addr_mode;
bool page_swap = false;
@@ -3061,14 +3075,14 @@ static int ixgbe_get_module_info(struct net_device *dev,
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_COMP,
&sff8472_rev);
- if (status != 0)
+ if (status)
return -EIO;
/* addressing mode is not supported */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_SWAP,
&addr_mode);
- if (status != 0)
+ if (status)
return -EIO;
if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
@@ -3095,7 +3109,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
u8 databyte = 0xFF;
int i = 0;
@@ -3112,7 +3126,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
else
status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
- if (status != 0)
+ if (status)
return -EIO;
data[i - ee->offset] = databyte;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5be12a00e1f4..23d82b34314e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4757,7 +4757,7 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
u32 speed;
bool autoneg, link_up = false;
- u32 ret = IXGBE_ERR_LINK_SETUP;
+ int ret = IXGBE_ERR_LINK_SETUP;
if (hw->mac.ops.check_link)
ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
@@ -8022,7 +8022,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
- u32 status;
+ int status;
__u16 mode;
if (nla_type(attr) != IFLA_BRIDGE_MODE)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 8a2be444113d..af828f89419f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -317,14 +317,14 @@ bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
**/
static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
{
- u32 status;
+ s32 status;
u16 phy_id_high = 0;
u16 phy_id_low = 0;
status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
&phy_id_high);
- if (status == 0) {
+ if (!status) {
hw->phy.id = (u32)(phy_id_high << 16);
status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
&phy_id_low);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index f5f948d08b43..0a8b5e42e1a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -696,14 +696,14 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
- swsm &= ~IXGBE_SWSM_SMBI;
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
-
swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
swsm &= ~IXGBE_SWFW_REGSMP;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm &= ~IXGBE_SWSM_SMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
IXGBE_WRITE_FLUSH(hw);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index cf5cf819a6b8..b0236985e915 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -103,6 +103,39 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
return 0;
}
+/**
+ * ixgbe_iosf_wait - Wait for IOSF command completion
+ * @hw: pointer to hardware structure
+ * @ctrl: pointer to location to receive final IOSF control value
+ *
+ * Return: failing status on timeout
+ *
+ * Note: ctrl can be NULL if the IOSF control register value is not needed
+ */
+static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
+{
+ u32 i, command;
+
+ /* Check every 10 usec to see if the address cycle completed.
+ * The SB IOSF BUSY bit will clear when the operation is
+ * complete.
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+ if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
+ break;
+ usleep_range(10, 20);
+ }
+ if (ctrl)
+ *ctrl = command;
+ if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+ hw_dbg(hw, "IOSF wait timed out\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return 0;
+}
+
/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the
* IOSF device
* @hw: pointer to hardware structure
@@ -113,7 +146,17 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *data)
{
- u32 i, command, error;
+ u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
+ u32 command, error;
+ s32 ret;
+
+ ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
+ if (ret)
+ return ret;
+
+ ret = ixgbe_iosf_wait(hw, NULL);
+ if (ret)
+ goto out;
command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
(device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
@@ -121,17 +164,7 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
/* Write IOSF control register */
IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
- /* Check every 10 usec to see if the address cycle completed.
- * The SB IOSF BUSY bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- usleep_range(10, 20);
-
- command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
- if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
- break;
- }
+ ret = ixgbe_iosf_wait(hw, &command);
if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
@@ -140,14 +173,12 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
return IXGBE_ERR_PHY;
}
- if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
- hw_dbg(hw, "Read timed out\n");
- return IXGBE_ERR_PHY;
- }
-
- *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
+ if (!ret)
+ *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
- return 0;
+out:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return ret;
}
/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
@@ -789,7 +820,17 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 data)
{
- u32 i, command, error;
+ u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
+ u32 command, error;
+ s32 ret;
+
+ ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
+ if (ret)
+ return ret;
+
+ ret = ixgbe_iosf_wait(hw, NULL);
+ if (ret)
+ goto out;
command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
(device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
@@ -800,17 +841,7 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
/* Write IOSF data register */
IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
- /* Check every 10 usec to see if the address cycle completed.
- * The SB IOSF BUSY bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- usleep_range(10, 20);
-
- command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
- if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
- break;
- }
+ ret = ixgbe_iosf_wait(hw, &command);
if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
@@ -819,12 +850,9 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
return IXGBE_ERR_PHY;
}
- if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
- hw_dbg(hw, "Write timed out\n");
- return IXGBE_ERR_PHY;
- }
-
- return 0;
+out:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return ret;
}
/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
@@ -1035,7 +1063,7 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
**/
static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
{
- u32 status;
+ s32 status;
u16 lasi, autoneg_status, speed;
ixgbe_link_speed force_speed;
@@ -1177,7 +1205,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
**/
static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
{
- u32 status;
+ s32 status;
u16 reg;
u32 retries = 2;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 1c75829eb166..d52639bc491f 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -3125,9 +3125,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mib_counters_clear(mp);
- init_timer(&mp->mib_counters_timer);
- mp->mib_counters_timer.data = (unsigned long)mp;
- mp->mib_counters_timer.function = mib_counters_timer_wrapper;
+ setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper,
+ (unsigned long)mp);
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
spin_lock_init(&mp->mib_counters_lock);
@@ -3136,9 +3135,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
- init_timer(&mp->rx_oom);
- mp->rx_oom.data = (unsigned long)mp;
- mp->rx_oom.function = oom_timer_wrapper;
+ setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index ce5f7f9cff06..ecce8261ce3b 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1359,7 +1359,7 @@ static void *mvneta_frag_alloc(const struct mvneta_port *pp)
static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
{
if (likely(pp->frag_size <= PAGE_SIZE))
- put_page(virt_to_head_page(data));
+ skb_free_frag(data);
else
kfree(data);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 529ef0594b90..68ae765873a9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -882,7 +882,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
{
struct ib_smp *smp = inbox->buf;
u32 index;
- u8 port;
+ u8 port, slave_port;
u8 opcode_modifier;
u16 *table;
int err;
@@ -894,7 +894,8 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
__be32 slave_cap_mask;
__be64 slave_node_guid;
- port = vhcr->in_modifier;
+ slave_port = vhcr->in_modifier;
+ port = mlx4_slave_convert_port(dev, slave, slave_port);
/* network-view bit is for driver use only, and should not be passed to FW */
opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
@@ -930,8 +931,9 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
/*get the slave specific caps:*/
/*do the command */
+ smp->attr_mod = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
- vhcr->in_modifier, opcode_modifier,
+ port, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
/* modify the response for slaves */
if (!err && slave != mlx4_master_func_num(dev)) {
@@ -975,7 +977,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
}
if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
- vhcr->in_modifier, opcode_modifier,
+ port, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (!err) {
slave_node_guid = mlx4_get_slave_node_guid(dev, slave);
@@ -2915,7 +2917,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->mac = mac;
- mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
+ mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
vf, port, s_info->mac);
return 0;
}
@@ -3197,6 +3199,12 @@ int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
int enabled)
{
struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
+ &priv->dev, slave);
+ int min_port = find_first_bit(actv_ports.ports,
+ priv->dev.caps.num_ports) + 1;
+ int max_port = min_port - 1 +
+ bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
if (slave == mlx4_master_func_num(dev))
return 0;
@@ -3206,6 +3214,11 @@ int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
enabled < 0 || enabled > 1)
return -EINVAL;
+ if (min_port == max_port && dev->caps.num_ports > 1) {
+ mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
+ return -EPROTONOSUPPORT;
+ }
+
priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index e71f31387ac6..3348e646db70 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -292,7 +292,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
u64 mtt_addr;
int err;
- if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
+ if (vector >= dev->caps.num_comp_vectors)
return -EINVAL;
cq->vector = vector;
@@ -319,7 +319,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq_context->flags |= cpu_to_be32(1 << 19);
cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
- cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
+ cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -339,11 +339,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
init_completion(&cq->free);
cq->comp = mlx4_add_cq_to_tasklet;
cq->tasklet_ctx.priv =
- &priv->eq_table.eq[cq->vector].tasklet_ctx;
+ &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
- cq->irq = priv->eq_table.eq[cq->vector].irq;
+ cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
return 0;
err_radix:
@@ -368,7 +368,10 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
- synchronize_irq(priv->eq_table.eq[cq->vector].irq);
+ synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
+ if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
+ synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 22da4d0d0f05..63769df872a4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -66,6 +66,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->ring = ring;
cq->is_tx = mode;
+ cq->vector = mdev->dev->caps.num_comp_vectors;
/* Allocate HW buffers on provided NUMA node.
* dev->numa_node is used in mtt range allocation flow.
@@ -101,12 +102,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int err = 0;
char name[25];
int timestamp_en = 0;
- struct cpu_rmap *rmap =
-#ifdef CONFIG_RFS_ACCEL
- priv->dev->rx_cpu_rmap;
-#else
- NULL;
-#endif
+ bool assigned_eq = false;
cq->dev = mdev->pndev[priv->port];
cq->mcq.set_ci_db = cq->wqres.db.db;
@@ -116,23 +112,19 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
memset(cq->buf, 0, cq->buf_size);
if (cq->is_tx == RX) {
- if (mdev->dev->caps.comp_pool) {
- if (!cq->vector) {
- sprintf(name, "%s-%d", priv->dev->name,
- cq->ring);
- /* Set IRQ for specific name (per ring) */
- if (mlx4_assign_eq(mdev->dev, name, rmap,
- &cq->vector)) {
- cq->vector = (cq->ring + 1 + priv->port)
- % mdev->dev->caps.num_comp_vectors;
- mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
- name);
- }
-
+ if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
+ cq->vector)) {
+ cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
+
+ err = mlx4_assign_eq(mdev->dev, priv->port,
+ &cq->vector);
+ if (err) {
+ mlx4_err(mdev, "Failed assigning an EQ to %s\n",
+ name);
+ goto free_eq;
}
- } else {
- cq->vector = (cq->ring + 1 + priv->port) %
- mdev->dev->caps.num_comp_vectors;
+
+ assigned_eq = true;
}
cq->irq_desc =
@@ -159,7 +151,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
&mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
cq->vector, 0, timestamp_en);
if (err)
- return err;
+ goto free_eq;
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
@@ -168,13 +160,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT);
} else {
- struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
-
- err = irq_set_affinity_hint(cq->mcq.irq,
- ring->affinity_mask);
- if (err)
- mlx4_warn(mdev, "Failed setting affinity hint\n");
-
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_hash_add(&cq->napi);
}
@@ -182,6 +167,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
napi_enable(&cq->napi);
return 0;
+
+free_eq:
+ if (assigned_eq)
+ mlx4_release_eq(mdev->dev, cq->vector);
+ cq->vector = mdev->dev->caps.num_comp_vectors;
+ return err;
}
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
@@ -191,9 +182,9 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
- if (priv->mdev->dev->caps.comp_pool && cq->vector) {
+ if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
+ cq->is_tx == RX)
mlx4_release_eq(priv->mdev->dev, cq->vector);
- }
cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
@@ -207,7 +198,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
if (!cq->is_tx) {
napi_hash_del(&cq->napi);
synchronize_rcu();
- irq_set_affinity_hint(cq->mcq.irq, NULL);
}
netif_napi_del(&cq->napi);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index cf467a9f6cc7..98efb5842fca 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1954,7 +1954,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
int i;
#ifdef CONFIG_RFS_ACCEL
- free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
priv->dev->rx_cpu_rmap = NULL;
#endif
@@ -2012,11 +2011,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
}
#ifdef CONFIG_RFS_ACCEL
- if (priv->mdev->dev->caps.comp_pool) {
- priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
- if (!priv->dev->rx_cpu_rmap)
- goto err;
- }
+ priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
#endif
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 2a77a6b19121..35f726c17e48 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -337,15 +337,10 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
struct mlx4_dev *dev = mdev->dev;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- if (!dev->caps.comp_pool)
- num_of_eqs = max_t(int, MIN_RX_RINGS,
- min_t(int,
- dev->caps.num_comp_vectors,
- DEF_RX_RINGS));
- else
- num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
- dev->caps.comp_pool/
- dev->caps.num_ports) - 1;
+ num_of_eqs = max_t(int, MIN_RX_RINGS,
+ min_t(int,
+ mlx4_get_eqs_per_port(mdev->dev, i),
+ DEF_RX_RINGS));
num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
min_t(int, num_of_eqs,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 2619c9fbf42d..aae13adfb492 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -145,7 +145,7 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
struct mlx4_eqe *eqe;
u8 slave;
- int i;
+ int i, phys_port, slave_port;
for (eqe = next_slave_event_eqe(slave_eq); eqe;
eqe = next_slave_event_eqe(slave_eq)) {
@@ -154,9 +154,20 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
/* All active slaves need to receive the event */
if (slave == ALL_SLAVES) {
for (i = 0; i <= dev->persist->num_vfs; i++) {
+ phys_port = 0;
+ if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT &&
+ eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) {
+ phys_port = eqe->event.port_mgmt_change.port;
+ slave_port = mlx4_phys_to_slave_port(dev, i, phys_port);
+ if (slave_port < 0) /* VF doesn't have this port */
+ continue;
+ eqe->event.port_mgmt_change.port = slave_port;
+ }
if (mlx4_GEN_EQE(dev, i, eqe))
mlx4_warn(dev, "Failed to generate event for slave %d\n",
i);
+ if (phys_port)
+ eqe->event.port_mgmt_change.port = phys_port;
}
} else {
if (mlx4_GEN_EQE(dev, slave, eqe))
@@ -210,6 +221,22 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
slave_event(dev, slave, eqe);
}
+#if defined(CONFIG_SMP)
+static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
+{
+ int hint_err;
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_eq *eq = &priv->eq_table.eq[vec];
+
+ if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+ return;
+
+ hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+ if (hint_err)
+ mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
+}
+#endif
+
int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
{
struct mlx4_eqe eqe;
@@ -224,7 +251,7 @@ int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
- eqe.event.port_mgmt_change.port = port;
+ eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
return mlx4_GEN_EQE(dev, slave, &eqe);
}
@@ -241,7 +268,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
- eqe.event.port_mgmt_change.port = port;
+ eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
return mlx4_GEN_EQE(dev, slave, &eqe);
}
@@ -251,6 +278,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
u8 port_subtype_change)
{
struct mlx4_eqe eqe;
+ u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port);
/*don't send if we don't have the that slave */
if (dev->persist->num_vfs < slave)
@@ -259,7 +287,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
eqe.subtype = port_subtype_change;
- eqe.event.port_change.port = cpu_to_be32(port << 28);
+ eqe.event.port_change.port = cpu_to_be32(slave_port << 28);
mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
port_subtype_change, slave, port);
@@ -589,6 +617,10 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) {
if (i == mlx4_master_func_num(dev))
continue;
+ eqe->event.port_change.port =
+ cpu_to_be32(
+ (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
+ | (mlx4_phys_to_slave_port(dev, i, port) << 28));
mlx4_slave_event(dev, i, eqe);
}
}
@@ -879,8 +911,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev)
* we need to map, take the difference of highest index and
* the lowest index we'll use and add 1.
*/
- return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
- dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
+ return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
+ dev->caps.reserved_eqs / 4 + 1;
}
static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
@@ -1069,32 +1101,21 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
static void mlx4_free_irqs(struct mlx4_dev *dev)
{
struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
- struct mlx4_priv *priv = mlx4_priv(dev);
- int i, vec;
+ int i;
if (eq_table->have_irq)
free_irq(dev->persist->pdev->irq, dev);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
+ free_cpumask_var(eq_table->eq[i].affinity_mask);
+#if defined(CONFIG_SMP)
+ irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
+#endif
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
- for (i = 0; i < dev->caps.comp_pool; i++) {
- /*
- * Freeing the assigned irq's
- * all bits should be 0, but we need to validate
- */
- if (priv->msix_ctl.pool_bm & 1ULL << i) {
- /* NO need protecting*/
- vec = dev->caps.num_comp_vectors + 1 + i;
- free_irq(priv->eq_table.eq[vec].irq,
- &priv->eq_table.eq[vec]);
- }
- }
-
-
kfree(eq_table->irq_names);
}
@@ -1175,76 +1196,73 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
}
priv->eq_table.irq_names =
- kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
- dev->caps.comp_pool),
+ kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
GFP_KERNEL);
if (!priv->eq_table.irq_names) {
err = -ENOMEM;
- goto err_out_bitmap;
+ goto err_out_clr_int;
}
- for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
- err = mlx4_create_eq(dev, dev->caps.num_cqs -
- dev->caps.reserved_cqs +
- MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
- &priv->eq_table.eq[i]);
- if (err) {
- --i;
- goto err_out_unmap;
- }
- }
-
- err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
- &priv->eq_table.eq[dev->caps.num_comp_vectors]);
- if (err)
- goto err_out_comp;
-
- /*if additional completion vectors poolsize is 0 this loop will not run*/
- for (i = dev->caps.num_comp_vectors + 1;
- i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
+ if (i == MLX4_EQ_ASYNC) {
+ err = mlx4_create_eq(dev,
+ MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
+ 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
+ } else {
+ struct mlx4_eq *eq = &priv->eq_table.eq[i];
+#ifdef CONFIG_RFS_ACCEL
+ int port = find_first_bit(eq->actv_ports.ports,
+ dev->caps.num_ports) + 1;
+
+ if (port <= dev->caps.num_ports) {
+ struct mlx4_port_info *info =
+ &mlx4_priv(dev)->port[port];
+
+ if (!info->rmap) {
+ info->rmap = alloc_irq_cpu_rmap(
+ mlx4_get_eqs_per_port(dev, port));
+ if (!info->rmap) {
+ mlx4_warn(dev, "Failed to allocate cpu rmap\n");
+ err = -ENOMEM;
+ goto err_out_unmap;
+ }
+ }
- err = mlx4_create_eq(dev, dev->caps.num_cqs -
- dev->caps.reserved_cqs +
- MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
- &priv->eq_table.eq[i]);
- if (err) {
- --i;
- goto err_out_unmap;
+ err = irq_cpu_rmap_add(
+ info->rmap, eq->irq);
+ if (err)
+ mlx4_warn(dev, "Failed adding irq rmap\n");
+ }
+#endif
+ err = mlx4_create_eq(dev, dev->caps.num_cqs -
+ dev->caps.reserved_cqs +
+ MLX4_NUM_SPARE_EQE,
+ (dev->flags & MLX4_FLAG_MSI_X) ?
+ i + 1 - !!(i > MLX4_EQ_ASYNC) : 0,
+ eq);
}
+ if (err)
+ goto err_out_unmap;
}
-
if (dev->flags & MLX4_FLAG_MSI_X) {
const char *eq_name;
- for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
- if (i < dev->caps.num_comp_vectors) {
- snprintf(priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE,
- "mlx4-comp-%d@pci:%s", i,
- pci_name(dev->persist->pdev));
- } else {
- snprintf(priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE,
- "mlx4-async@pci:%s",
- pci_name(dev->persist->pdev));
- }
+ snprintf(priv->eq_table.irq_names +
+ MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE,
+ "mlx4-async@pci:%s",
+ pci_name(dev->persist->pdev));
+ eq_name = priv->eq_table.irq_names +
+ MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE;
- eq_name = priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE;
- err = request_irq(priv->eq_table.eq[i].irq,
- mlx4_msi_x_interrupt, 0, eq_name,
- priv->eq_table.eq + i);
- if (err)
- goto err_out_async;
+ err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq,
+ mlx4_msi_x_interrupt, 0, eq_name,
+ priv->eq_table.eq + MLX4_EQ_ASYNC);
+ if (err)
+ goto err_out_unmap;
- priv->eq_table.eq[i].have_irq = 1;
- }
+ priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1;
} else {
snprintf(priv->eq_table.irq_names,
MLX4_IRQNAME_SIZE,
@@ -1253,36 +1271,38 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
IRQF_SHARED, priv->eq_table.irq_names, dev);
if (err)
- goto err_out_async;
+ goto err_out_unmap;
priv->eq_table.have_irq = 1;
}
err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
if (err)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
- for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
- eq_set_ci(&priv->eq_table.eq[i], 1);
+ /* arm ASYNC eq */
+ eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1);
return 0;
-err_out_async:
- mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
-
-err_out_comp:
- i = dev->caps.num_comp_vectors - 1;
-
err_out_unmap:
- while (i >= 0) {
- mlx4_free_eq(dev, &priv->eq_table.eq[i]);
- --i;
+ while (i >= 0)
+ mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+#ifdef CONFIG_RFS_ACCEL
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ if (mlx4_priv(dev)->port[i].rmap) {
+ free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+ mlx4_priv(dev)->port[i].rmap = NULL;
+ }
}
+#endif
+ mlx4_free_irqs(dev);
+
+err_out_clr_int:
if (!mlx4_is_slave(dev))
mlx4_unmap_clr_int(dev);
- mlx4_free_irqs(dev);
err_out_bitmap:
mlx4_unmap_uar(dev);
@@ -1300,11 +1320,19 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
int i;
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+#ifdef CONFIG_RFS_ACCEL
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ if (mlx4_priv(dev)->port[i].rmap) {
+ free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+ mlx4_priv(dev)->port[i].rmap = NULL;
+ }
+ }
+#endif
mlx4_free_irqs(dev);
- for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
if (!mlx4_is_slave(dev))
@@ -1355,87 +1383,169 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
/* Return to default */
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
return err;
}
EXPORT_SYMBOL(mlx4_test_interrupts);
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
- int *vector)
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+ if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) ||
+ (vector == MLX4_EQ_ASYNC))
+ return false;
+
+ return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports);
+}
+EXPORT_SYMBOL(mlx4_is_eq_vector_valid);
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port)
+{
struct mlx4_priv *priv = mlx4_priv(dev);
- int vec = 0, err = 0, i;
+ unsigned int i;
+ unsigned int sum = 0;
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; i++)
+ sum += !!test_bit(port - 1,
+ priv->eq_table.eq[i].actv_ports.ports);
+
+ return sum;
+}
+EXPORT_SYMBOL(mlx4_get_eqs_per_port);
+
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+ if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1))
+ return -EINVAL;
+
+ return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports,
+ dev->caps.num_ports) > 1);
+}
+EXPORT_SYMBOL(mlx4_is_eq_shared);
+
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port)
+{
+ return mlx4_priv(dev)->port[port].rmap;
+}
+EXPORT_SYMBOL(mlx4_get_cpu_rmap);
+
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err = 0, i = 0;
+ u32 min_ref_count_val = (u32)-1;
+ int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector);
+ int *prequested_vector = NULL;
+
mutex_lock(&priv->msix_ctl.pool_lock);
- for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
- if (~priv->msix_ctl.pool_bm & 1ULL << i) {
- priv->msix_ctl.pool_bm |= 1ULL << i;
- vec = dev->caps.num_comp_vectors + 1 + i;
- snprintf(priv->eq_table.irq_names +
- vec * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE, "%s", name);
-#ifdef CONFIG_RFS_ACCEL
- if (rmap) {
- err = irq_cpu_rmap_add(rmap,
- priv->eq_table.eq[vec].irq);
- if (err)
- mlx4_warn(dev, "Failed adding irq rmap\n");
+ if (requested_vector < (dev->caps.num_comp_vectors + 1) &&
+ (requested_vector >= 0) &&
+ (requested_vector != MLX4_EQ_ASYNC)) {
+ if (test_bit(port - 1,
+ priv->eq_table.eq[requested_vector].actv_ports.ports)) {
+ prequested_vector = &requested_vector;
+ } else {
+ struct mlx4_eq *eq;
+
+ for (i = 1; i < port;
+ requested_vector += mlx4_get_eqs_per_port(dev, i++))
+ ;
+
+ eq = &priv->eq_table.eq[requested_vector];
+ if (requested_vector < dev->caps.num_comp_vectors + 1 &&
+ test_bit(port - 1, eq->actv_ports.ports)) {
+ prequested_vector = &requested_vector;
}
-#endif
- err = request_irq(priv->eq_table.eq[vec].irq,
- mlx4_msi_x_interrupt, 0,
- &priv->eq_table.irq_names[vec<<5],
- priv->eq_table.eq + vec);
- if (err) {
- /*zero out bit by fliping it*/
- priv->msix_ctl.pool_bm ^= 1 << i;
- vec = 0;
- continue;
- /*we dont want to break here*/
+ }
+ }
+
+ if (!prequested_vector) {
+ requested_vector = -1;
+ for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1;
+ i++) {
+ struct mlx4_eq *eq = &priv->eq_table.eq[i];
+
+ if (min_ref_count_val > eq->ref_count &&
+ test_bit(port - 1, eq->actv_ports.ports)) {
+ min_ref_count_val = eq->ref_count;
+ requested_vector = i;
}
+ }
+
+ if (requested_vector < 0) {
+ err = -ENOSPC;
+ goto err_unlock;
+ }
+
+ prequested_vector = &requested_vector;
+ }
+
+ if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) &&
+ dev->flags & MLX4_FLAG_MSI_X) {
+ set_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+ snprintf(priv->eq_table.irq_names +
+ *prequested_vector * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE, "mlx4-%d@%s",
+ *prequested_vector, dev_name(&dev->persist->pdev->dev));
- eq_set_ci(&priv->eq_table.eq[vec], 1);
+ err = request_irq(priv->eq_table.eq[*prequested_vector].irq,
+ mlx4_msi_x_interrupt, 0,
+ &priv->eq_table.irq_names[*prequested_vector << 5],
+ priv->eq_table.eq + *prequested_vector);
+
+ if (err) {
+ clear_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+ *prequested_vector = -1;
+ } else {
+#if defined(CONFIG_SMP)
+ mlx4_set_eq_affinity_hint(priv, *prequested_vector);
+#endif
+ eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1);
+ priv->eq_table.eq[*prequested_vector].have_irq = 1;
}
}
+
+ if (!err && *prequested_vector >= 0)
+ priv->eq_table.eq[*prequested_vector].ref_count++;
+
+err_unlock:
mutex_unlock(&priv->msix_ctl.pool_lock);
- if (vec) {
- *vector = vec;
- } else {
+ if (!err && *prequested_vector >= 0)
+ *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector);
+ else
*vector = 0;
- err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
- }
+
return err;
}
EXPORT_SYMBOL(mlx4_assign_eq);
-int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- return priv->eq_table.eq[vec].irq;
+ return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq;
}
EXPORT_SYMBOL(mlx4_eq_get_irq);
void mlx4_release_eq(struct mlx4_dev *dev, int vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- /*bm index*/
- int i = vec - dev->caps.num_comp_vectors - 1;
-
- if (likely(i >= 0)) {
- /*sanity check , making sure were not trying to free irq's
- Belonging to a legacy EQ*/
- mutex_lock(&priv->msix_ctl.pool_lock);
- if (priv->msix_ctl.pool_bm & 1ULL << i) {
- free_irq(priv->eq_table.eq[vec].irq,
- &priv->eq_table.eq[vec]);
- priv->msix_ctl.pool_bm &= ~(1ULL << i);
- }
- mutex_unlock(&priv->msix_ctl.pool_lock);
- }
+ int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec);
+ mutex_lock(&priv->msix_ctl.pool_lock);
+ priv->eq_table.eq[eq_vec].ref_count--;
+
+ /* once we allocated EQ, we don't release it because it might be binded
+ * to cpu_rmap.
+ */
+ mutex_unlock(&priv->msix_ctl.pool_lock);
}
EXPORT_SYMBOL(mlx4_release_eq);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index ced5ecab5aa7..7d57777e65c5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2364,11 +2364,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (err) {
if (dev->flags & MLX4_FLAG_MSI_X) {
mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
mlx4_warn(dev, "Trying again without MSI-X\n");
} else {
mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
@@ -2481,14 +2481,45 @@ err_uar_table_free:
return err;
}
+static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
+{
+ int requested_cpu = 0;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_eq *eq;
+ int off = 0;
+ int i;
+
+ if (eqn > dev->caps.num_comp_vectors)
+ return -EINVAL;
+
+ for (i = 1; i < port; i++)
+ off += mlx4_get_eqs_per_port(dev, i);
+
+ requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
+
+ /* Meaning EQs are shared, and this call comes from the second port */
+ if (requested_cpu < 0)
+ return 0;
+
+ eq = &priv->eq_table.eq[eqn];
+
+ if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_set_cpu(requested_cpu, eq->affinity_mask);
+
+ return 0;
+}
+
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
int i;
+ int port = 0;
if (msi_x) {
- int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ;
+ int nreq = dev->caps.num_ports * num_online_cpus() + 1;
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
nreq);
@@ -2503,20 +2534,55 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
nreq);
- if (nreq < 0) {
+ if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
kfree(entries);
goto no_msi;
- } else if (nreq < MSIX_LEGACY_SZ +
- dev->caps.num_ports * MIN_MSIX_P_PORT) {
- /*Working in legacy mode , all EQ's shared*/
- dev->caps.comp_pool = 0;
- dev->caps.num_comp_vectors = nreq - 1;
- } else {
- dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
- dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
}
- for (i = 0; i < nreq; ++i)
- priv->eq_table.eq[i].irq = entries[i].vector;
+ /* 1 is reserved for events (asyncrounous EQ) */
+ dev->caps.num_comp_vectors = nreq - 1;
+
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
+ bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
+ dev->caps.num_ports);
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
+ if (i == MLX4_EQ_ASYNC)
+ continue;
+
+ priv->eq_table.eq[i].irq =
+ entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
+
+ if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
+ bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+ dev->caps.num_ports);
+ /* We don't set affinity hint when there
+ * aren't enough EQs
+ */
+ } else {
+ set_bit(port,
+ priv->eq_table.eq[i].actv_ports.ports);
+ if (mlx4_init_affinity_hint(dev, port + 1, i))
+ mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
+ i);
+ }
+ /* We divide the Eqs evenly between the two ports.
+ * (dev->caps.num_comp_vectors / dev->caps.num_ports)
+ * refers to the number of Eqs per port
+ * (i.e eqs_per_port). Theoretically, we would like to
+ * write something like (i + 1) % eqs_per_port == 0.
+ * However, since there's an asynchronous Eq, we have
+ * to skip over it by comparing this condition to
+ * !!((i + 1) > MLX4_EQ_ASYNC).
+ */
+ if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
+ ((i + 1) %
+ (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
+ !!((i + 1) > MLX4_EQ_ASYNC))
+ /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
+ * everything is shared anyway.
+ */
+ port++;
+ }
dev->flags |= MLX4_FLAG_MSI_X;
@@ -2526,10 +2592,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
no_msi:
dev->caps.num_comp_vectors = 1;
- dev->caps.comp_pool = 0;
- for (i = 0; i < 2; ++i)
+ BUG_ON(MLX4_EQ_ASYNC >= 2);
+ for (i = 0; i < 2; ++i) {
priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
+ if (i != MLX4_EQ_ASYNC) {
+ bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+ dev->caps.num_ports);
+ }
+ }
}
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
@@ -2594,6 +2665,10 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(info->rmap);
+ info->rmap = NULL;
+#endif
}
static int mlx4_init_steering(struct mlx4_dev *dev)
@@ -2749,6 +2824,7 @@ disable_sriov:
free_mem:
dev->persist->num_vfs = 0;
kfree(dev->dev_vfs);
+ dev->dev_vfs = NULL;
return dev_flags & ~MLX4_FLAG_MASTER;
}
@@ -2900,6 +2976,7 @@ slave_start:
existing_vfs,
reset_flow);
+ mlx4_close_fw(dev);
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
dev->flags = dev_flags;
if (!SRIOV_VALID_STATE(dev->flags)) {
@@ -2988,18 +3065,6 @@ slave_start:
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
- int ib_ports = 0;
-
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
- ib_ports++;
-
- if (ib_ports &&
- (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
- mlx4_err(dev,
- "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
- err = -EINVAL;
- goto err_close;
- }
if (dev->caps.num_ports < 2 &&
num_vfs_argc > 1) {
err = -EINVAL;
@@ -3036,7 +3101,7 @@ slave_start:
if (err)
goto err_master_mfunc;
- priv->msix_ctl.pool_bm = 0;
+ bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
mutex_init(&priv->msix_ctl.pool_lock);
mlx4_enable_msi_x(dev);
@@ -3058,7 +3123,6 @@ slave_start:
!mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
dev->caps.num_comp_vectors = 1;
- dev->caps.comp_pool = 0;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 502d3dd2c888..f424900d23a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -287,6 +287,12 @@ struct mlx4_icm_table {
#define MLX4_CQE_SIZE_MASK_STRIDE 0x3
#define MLX4_EQE_SIZE_MASK_STRIDE 0x30
+#define MLX4_EQ_ASYNC 0
+#define MLX4_EQ_TO_CQ_VECTOR(vector) ((vector) - \
+ !!((int)(vector) >= MLX4_EQ_ASYNC))
+#define MLX4_CQ_TO_EQ_VECTOR(vector) ((vector) + \
+ !!((int)(vector) >= MLX4_EQ_ASYNC))
+
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
@@ -391,6 +397,9 @@ struct mlx4_eq {
struct mlx4_buf_list *page_list;
struct mlx4_mtt mtt;
struct mlx4_eq_tasklet tasklet_ctx;
+ struct mlx4_active_ports actv_ports;
+ u32 ref_count;
+ cpumask_var_t affinity_mask;
};
struct mlx4_slave_eqe {
@@ -808,6 +817,7 @@ struct mlx4_port_info {
struct mlx4_vlan_table vlan_table;
struct mlx4_roce_gid_table gid_table;
int base_qpn;
+ struct cpu_rmap *rmap;
};
struct mlx4_sense {
@@ -818,7 +828,7 @@ struct mlx4_sense {
};
struct mlx4_msix_ctl {
- u64 pool_bm;
+ DECLARE_BITMAP(pool_bm, MAX_MSIX);
struct mutex pool_lock;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d021f079f181..edd8fd69ec9a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -338,7 +338,7 @@ struct mlx4_en_cq {
struct napi_struct napi;
int size;
int buf_size;
- unsigned vector;
+ int vector;
enum cq_type is_tx;
u16 moder_time;
u16 moder_cnt;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index bafe2180cf0c..ab48386bfefc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2703,6 +2703,10 @@ static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
context->qkey = cpu_to_be32(qkey);
}
+static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
+ struct mlx4_qp_context *qpc,
+ struct mlx4_cmd_mailbox *inbox);
+
int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -2725,6 +2729,10 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct res_srq *srq;
int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+ err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
+ if (err)
+ return err;
+
err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
if (err)
return err;
@@ -3526,8 +3534,8 @@ static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
((port & 1) << 6);
- if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
- mlx4_is_eth(dev, port + 1)) {
+ if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
+ qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
qpc->pri_path.sched_queue = pri_sched_queue;
}
@@ -3965,6 +3973,22 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
return 0;
}
+static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+ struct _rule_hw *eth_header)
+{
+ if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
+ is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
+ struct mlx4_net_trans_rule_hw_eth *eth =
+ (struct mlx4_net_trans_rule_hw_eth *)eth_header;
+ struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
+ bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
+ next_rule->rsvd == 0;
+
+ if (last_rule)
+ ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
+ }
+}
+
/*
* In case of missing eth header, append eth header with a MAC address
* assigned to the VF.
@@ -4117,6 +4141,12 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
rule_header = (struct _rule_hw *)(ctrl + 1);
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
+ if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
+ handle_eth_header_mcast_prio(ctrl, rule_header);
+
+ if (slave == dev->caps.function)
+ goto execute;
+
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
if (validate_eth_header_mac(slave, rule_header, rlist)) {
@@ -4143,6 +4173,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
goto err_put;
}
+execute:
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
vhcr->in_modifier, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 8ff57e8e3e91..158c88c69ef9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -3,6 +3,18 @@
#
config MLX5_CORE
- tristate
+ tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
depends on PCI
default n
+ ---help---
+ Core driver for low level functionality of the ConnectX-4 and
+ Connect-IB cards by Mellanox Technologies.
+
+config MLX5_CORE_EN
+ bool "Mellanox Technologies ConnectX-4 Ethernet support"
+ depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+ default n
+ ---help---
+ Ethernet support in Mellanox Technologies ConnectX-4 NIC.
+ Ethernet and Infiniband support in ConnectX-4 are currently mutually
+ exclusive.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 105780bb980b..26a68b8af2c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -2,4 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
- mad.o
+ mad.o transobj.o vport.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \
+ en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \
+ en_txrx.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index ac0f7bf4be95..0715b497511f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -42,95 +42,36 @@
#include "mlx5_core.h"
/* Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0. If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
+ * register it in a memory region at HCA virtual address 0.
*/
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
- struct mlx5_buf *buf)
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
{
dma_addr_t t;
buf->size = size;
- if (size <= max_direct) {
- buf->nbufs = 1;
- buf->npages = 1;
- buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
- size, &t, GFP_KERNEL);
- if (!buf->direct.buf)
- return -ENOMEM;
-
- buf->direct.map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
- } else {
- int i;
-
- buf->direct.buf = NULL;
- buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- buf->npages = buf->nbufs;
- buf->page_shift = PAGE_SHIFT;
- buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
- GFP_KERNEL);
- if (!buf->page_list)
- return -ENOMEM;
-
- for (i = 0; i < buf->nbufs; i++) {
- buf->page_list[i].buf =
- dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
- &t, GFP_KERNEL);
- if (!buf->page_list[i].buf)
- goto err_free;
-
- buf->page_list[i].map = t;
- }
-
- if (BITS_PER_LONG == 64) {
- struct page **pages;
- pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
- if (!pages)
- goto err_free;
- for (i = 0; i < buf->nbufs; i++)
- pages[i] = virt_to_page(buf->page_list[i].buf);
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- goto err_free;
- }
- }
+ buf->npages = 1;
+ buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
+ buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
+ size, &t, GFP_KERNEL);
+ if (!buf->direct.buf)
+ return -ENOMEM;
- return 0;
+ buf->direct.map = t;
-err_free:
- mlx5_buf_free(dev, buf);
+ while (t & ((1 << buf->page_shift) - 1)) {
+ --buf->page_shift;
+ buf->npages *= 2;
+ }
- return -ENOMEM;
+ return 0;
}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
{
- int i;
-
- if (buf->nbufs == 1)
- dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
- buf->direct.map);
- else {
- if (BITS_PER_LONG == 64)
- vunmap(buf->direct.buf);
-
- for (i = 0; i < buf->nbufs; i++)
- if (buf->page_list[i].buf)
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
- buf->page_list[i].buf,
- buf->page_list[i].map);
- kfree(buf->page_list);
- }
+ dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
+ buf->direct.map);
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
@@ -230,10 +171,7 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
int i;
for (i = 0; i < buf->npages; i++) {
- if (buf->nbufs == 1)
- addr = buf->direct.map + (i << buf->page_shift);
- else
- addr = buf->page_list[i].map;
+ addr = buf->direct.map + (i << buf->page_shift);
pas[i] = cpu_to_be64(addr);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e3273faf4568..75ff58dc1ff5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -75,25 +75,6 @@ enum {
MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
};
-enum {
- MLX5_CMD_STAT_OK = 0x0,
- MLX5_CMD_STAT_INT_ERR = 0x1,
- MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
- MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
- MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
- MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
- MLX5_CMD_STAT_RES_BUSY = 0x6,
- MLX5_CMD_STAT_LIM_ERR = 0x8,
- MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
- MLX5_CMD_STAT_IX_ERR = 0xa,
- MLX5_CMD_STAT_NO_RES_ERR = 0xf,
- MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
- MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
- MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
- MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
- MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
-};
-
static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out,
@@ -390,8 +371,17 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_ARM_RQ:
return "ARM_RQ";
- case MLX5_CMD_OP_RESIZE_SRQ:
- return "RESIZE_SRQ";
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ return "CREATE_XRC_SRQ";
+
+ case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+ return "DESTROY_XRC_SRQ";
+
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ return "QUERY_XRC_SRQ";
+
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ return "ARM_XRC_SRQ";
case MLX5_CMD_OP_ALLOC_PD:
return "ALLOC_PD";
@@ -408,8 +398,8 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_ATTACH_TO_MCG:
return "ATTACH_TO_MCG";
- case MLX5_CMD_OP_DETACH_FROM_MCG:
- return "DETACH_FROM_MCG";
+ case MLX5_CMD_OP_DETTACH_FROM_MCG:
+ return "DETTACH_FROM_MCG";
case MLX5_CMD_OP_ALLOC_XRCD:
return "ALLOC_XRCD";
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index eb0cf81f5f45..04ab7e445eae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -219,6 +219,24 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
}
EXPORT_SYMBOL(mlx5_core_modify_cq);
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+ struct mlx5_core_cq *cq,
+ u16 cq_period,
+ u16 cq_max_count)
+{
+ struct mlx5_modify_cq_mbox_in in;
+
+ memset(&in, 0, sizeof(in));
+
+ in.cqn = cpu_to_be32(cq->cqn);
+ in.ctx.cq_period = cpu_to_be16(cq_period);
+ in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
+ in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
+ MLX5_CQ_MODIFY_COUNT);
+
+ return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
+}
+
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
new file mode 100644
index 000000000000..e9edb7210de1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -0,0 +1,520 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/vport.h>
+#include "wq.h"
+#include "transobj.h"
+#include "mlx5_core.h"
+
+#define MLX5E_MAX_NUM_TC 8
+
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
+
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
+#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7
+#define MLX5E_PARAMS_MIN_MTU 46
+
+#define MLX5E_TX_CQ_POLL_BUDGET 128
+#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
+
+static const char vport_strings[][ETH_GSTRING_LEN] = {
+ /* vport statistics */
+ "rx_packets",
+ "rx_bytes",
+ "tx_packets",
+ "tx_bytes",
+ "rx_error_packets",
+ "rx_error_bytes",
+ "tx_error_packets",
+ "tx_error_bytes",
+ "rx_unicast_packets",
+ "rx_unicast_bytes",
+ "tx_unicast_packets",
+ "tx_unicast_bytes",
+ "rx_multicast_packets",
+ "rx_multicast_bytes",
+ "tx_multicast_packets",
+ "tx_multicast_bytes",
+ "rx_broadcast_packets",
+ "rx_broadcast_bytes",
+ "tx_broadcast_packets",
+ "tx_broadcast_bytes",
+
+ /* SW counters */
+ "tso_packets",
+ "tso_bytes",
+ "lro_packets",
+ "lro_bytes",
+ "rx_csum_good",
+ "rx_csum_none",
+ "tx_csum_offload",
+ "tx_queue_stopped",
+ "tx_queue_wake",
+ "tx_queue_dropped",
+ "rx_wqe_err",
+};
+
+struct mlx5e_vport_stats {
+ /* HW counters */
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_error_packets;
+ u64 rx_error_bytes;
+ u64 tx_error_packets;
+ u64 tx_error_bytes;
+ u64 rx_unicast_packets;
+ u64 rx_unicast_bytes;
+ u64 tx_unicast_packets;
+ u64 tx_unicast_bytes;
+ u64 rx_multicast_packets;
+ u64 rx_multicast_bytes;
+ u64 tx_multicast_packets;
+ u64 tx_multicast_bytes;
+ u64 rx_broadcast_packets;
+ u64 rx_broadcast_bytes;
+ u64 tx_broadcast_packets;
+ u64 tx_broadcast_bytes;
+
+ /* SW counters */
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 lro_packets;
+ u64 lro_bytes;
+ u64 rx_csum_good;
+ u64 rx_csum_none;
+ u64 tx_csum_offload;
+ u64 tx_queue_stopped;
+ u64 tx_queue_wake;
+ u64 tx_queue_dropped;
+ u64 rx_wqe_err;
+
+#define NUM_VPORT_COUNTERS 31
+};
+
+static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
+ "packets",
+ "csum_none",
+ "lro_packets",
+ "lro_bytes",
+ "wqe_err"
+};
+
+struct mlx5e_rq_stats {
+ u64 packets;
+ u64 csum_none;
+ u64 lro_packets;
+ u64 lro_bytes;
+ u64 wqe_err;
+#define NUM_RQ_STATS 5
+};
+
+static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
+ "packets",
+ "tso_packets",
+ "tso_bytes",
+ "csum_offload_none",
+ "stopped",
+ "wake",
+ "dropped",
+ "nop"
+};
+
+struct mlx5e_sq_stats {
+ u64 packets;
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 csum_offload_none;
+ u64 stopped;
+ u64 wake;
+ u64 dropped;
+ u64 nop;
+#define NUM_SQ_STATS 8
+};
+
+struct mlx5e_stats {
+ struct mlx5e_vport_stats vport;
+};
+
+struct mlx5e_params {
+ u8 log_sq_size;
+ u8 log_rq_size;
+ u16 num_channels;
+ u8 default_vlan_prio;
+ u8 num_tc;
+ u16 rx_cq_moderation_usec;
+ u16 rx_cq_moderation_pkts;
+ u16 tx_cq_moderation_usec;
+ u16 tx_cq_moderation_pkts;
+ u16 min_rx_wqes;
+ u16 rx_hash_log_tbl_sz;
+ bool lro_en;
+ u32 lro_wqe_sz;
+};
+
+enum {
+ MLX5E_RQ_STATE_POST_WQES_ENABLE,
+};
+
+enum cq_flags {
+ MLX5E_CQ_HAS_CQES = 1,
+};
+
+struct mlx5e_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+ void *sqrq;
+ unsigned long flags;
+
+ /* data path - accessed per napi poll */
+ struct napi_struct *napi;
+ struct mlx5_core_cq mcq;
+ struct mlx5e_channel *channel;
+
+ /* control */
+ struct mlx5_wq_ctrl wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_rq {
+ /* data path */
+ struct mlx5_wq_ll wq;
+ u32 wqe_sz;
+ struct sk_buff **skb;
+
+ struct device *pdev;
+ struct net_device *netdev;
+ struct mlx5e_rq_stats stats;
+ struct mlx5e_cq cq;
+
+ unsigned long state;
+ int ix;
+
+ /* control */
+ struct mlx5_wq_ctrl wq_ctrl;
+ u32 rqn;
+ struct mlx5e_channel *channel;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_tx_skb_cb {
+ u32 num_bytes;
+ u8 num_wqebbs;
+ u8 num_dma;
+};
+
+#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
+
+struct mlx5e_sq_dma {
+ dma_addr_t addr;
+ u32 size;
+};
+
+enum {
+ MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+};
+
+struct mlx5e_sq {
+ /* data path */
+
+ /* dirtied @completion */
+ u16 cc;
+ u32 dma_fifo_cc;
+
+ /* dirtied @xmit */
+ u16 pc ____cacheline_aligned_in_smp;
+ u32 dma_fifo_pc;
+ u32 bf_offset;
+ struct mlx5e_sq_stats stats;
+
+ struct mlx5e_cq cq;
+
+ /* pointers to per packet info: write@xmit, read@completion */
+ struct sk_buff **skb;
+ struct mlx5e_sq_dma *dma_fifo;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ u32 dma_fifo_mask;
+ void __iomem *uar_map;
+ struct netdev_queue *txq;
+ u32 sqn;
+ u32 bf_buf_size;
+ struct device *pdev;
+ __be32 mkey_be;
+ unsigned long state;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_uar uar;
+ struct mlx5e_channel *channel;
+ int tc;
+} ____cacheline_aligned_in_smp;
+
+static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
+{
+ return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
+ (sq->cc == sq->pc));
+}
+
+enum channel_flags {
+ MLX5E_CHANNEL_NAPI_SCHED = 1,
+};
+
+struct mlx5e_channel {
+ /* data path */
+ struct mlx5e_rq rq;
+ struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
+ struct napi_struct napi;
+ struct device *pdev;
+ struct net_device *netdev;
+ __be32 mkey_be;
+ u8 num_tc;
+ unsigned long flags;
+
+ /* control */
+ struct mlx5e_priv *priv;
+ int ix;
+ int cpu;
+};
+
+enum mlx5e_traffic_types {
+ MLX5E_TT_IPV4_TCP = 0,
+ MLX5E_TT_IPV6_TCP = 1,
+ MLX5E_TT_IPV4_UDP = 2,
+ MLX5E_TT_IPV6_UDP = 3,
+ MLX5E_TT_IPV4 = 4,
+ MLX5E_TT_IPV6 = 5,
+ MLX5E_TT_ANY = 6,
+ MLX5E_NUM_TT = 7,
+};
+
+enum {
+ MLX5E_RQT_SPREADING = 0,
+ MLX5E_RQT_DEFAULT_RQ = 1,
+ MLX5E_NUM_RQT = 2,
+};
+
+struct mlx5e_eth_addr_info {
+ u8 addr[ETH_ALEN + 2];
+ u32 tt_vec;
+ u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */
+};
+
+#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
+
+struct mlx5e_eth_addr_db {
+ struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE];
+ struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
+ struct mlx5e_eth_addr_info broadcast;
+ struct mlx5e_eth_addr_info allmulti;
+ struct mlx5e_eth_addr_info promisc;
+ bool broadcast_enabled;
+ bool allmulti_enabled;
+ bool promisc_enabled;
+};
+
+enum {
+ MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+ MLX5E_STATE_OPENED,
+};
+
+struct mlx5e_vlan_db {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u32 active_vlans_ft_ix[VLAN_N_VID];
+ u32 untagged_rule_ft_ix;
+ u32 any_vlan_rule_ft_ix;
+ bool filter_disabled;
+};
+
+struct mlx5e_flow_table {
+ void *vlan;
+ void *main;
+};
+
+struct mlx5e_priv {
+ /* priv data path fields - start */
+ int order_base_2_num_channels;
+ int queue_mapping_channel_mask;
+ int num_tc;
+ int default_vlan_prio;
+ /* priv data path fields - end */
+
+ unsigned long state;
+ struct mutex state_lock; /* Protects Interface state */
+ struct mlx5_uar cq_uar;
+ u32 pdn;
+ struct mlx5_core_mr mr;
+
+ struct mlx5e_channel **channel;
+ u32 tisn[MLX5E_MAX_NUM_TC];
+ u32 rqtn;
+ u32 tirn[MLX5E_NUM_TT];
+
+ struct mlx5e_flow_table ft;
+ struct mlx5e_eth_addr_db eth_addr;
+ struct mlx5e_vlan_db vlan;
+
+ struct mlx5e_params params;
+ spinlock_t async_events_spinlock; /* sync hw events */
+ struct work_struct update_carrier_work;
+ struct work_struct set_rx_mode_work;
+ struct delayed_work update_stats_work;
+
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5e_stats stats;
+};
+
+#define MLX5E_NET_IP_ALIGN 2
+
+struct mlx5e_tx_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_eth_seg eth;
+};
+
+struct mlx5e_rx_wqe {
+ struct mlx5_wqe_srq_next_seg next;
+ struct mlx5_wqe_data_seg data;
+};
+
+enum mlx5e_link_mode {
+ MLX5E_1000BASE_CX_SGMII = 0,
+ MLX5E_1000BASE_KX = 1,
+ MLX5E_10GBASE_CX4 = 2,
+ MLX5E_10GBASE_KX4 = 3,
+ MLX5E_10GBASE_KR = 4,
+ MLX5E_20GBASE_KR2 = 5,
+ MLX5E_40GBASE_CR4 = 6,
+ MLX5E_40GBASE_KR4 = 7,
+ MLX5E_56GBASE_R4 = 8,
+ MLX5E_10GBASE_CR = 12,
+ MLX5E_10GBASE_SR = 13,
+ MLX5E_10GBASE_ER = 14,
+ MLX5E_40GBASE_SR4 = 15,
+ MLX5E_40GBASE_LR4 = 16,
+ MLX5E_100GBASE_CR4 = 20,
+ MLX5E_100GBASE_SR4 = 21,
+ MLX5E_100GBASE_KR4 = 22,
+ MLX5E_100GBASE_LR4 = 23,
+ MLX5E_100BASE_TX = 24,
+ MLX5E_100BASE_T = 25,
+ MLX5E_10GBASE_T = 26,
+ MLX5E_25GBASE_CR = 27,
+ MLX5E_25GBASE_KR = 28,
+ MLX5E_25GBASE_SR = 29,
+ MLX5E_50GBASE_CR2 = 30,
+ MLX5E_50GBASE_KR2 = 31,
+ MLX5E_LINK_MODES_NUMBER,
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback);
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev);
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq);
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
+int mlx5e_napi_poll(struct napi_struct *napi, int budget);
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq);
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+
+void mlx5e_update_stats(struct mlx5e_priv *priv);
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv);
+void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_work(struct work_struct *work);
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid);
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid);
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+int mlx5e_open_locked(struct net_device *netdev);
+int mlx5e_close_locked(struct net_device *netdev);
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *new_params);
+
+static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
+ struct mlx5e_tx_wqe *wqe)
+{
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ *sq->wq.db = cpu_to_be32(sq->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)&wqe->ctrl,
+ sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
+ NULL);
+
+ sq->bf_offset ^= sq->bf_buf_size;
+}
+
+static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
+{
+ struct mlx5_core_cq *mcq;
+
+ mcq = &cq->mcq;
+ mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
+}
+
+extern const struct ethtool_ops mlx5e_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
new file mode 100644
index 000000000000..388938482ff9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+static void mlx5e_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+ strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static const struct {
+ u32 supported;
+ u32 advertised;
+ u32 speed;
+} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = {
+ [MLX5E_1000BASE_CX_SGMII] = {
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ [MLX5E_1000BASE_KX] = {
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ [MLX5E_10GBASE_CX4] = {
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_KX4] = {
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_KR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_20GBASE_KR2] = {
+ .supported = SUPPORTED_20000baseKR2_Full,
+ .advertised = ADVERTISED_20000baseKR2_Full,
+ .speed = 20000,
+ },
+ [MLX5E_40GBASE_CR4] = {
+ .supported = SUPPORTED_40000baseCR4_Full,
+ .advertised = ADVERTISED_40000baseCR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_40GBASE_KR4] = {
+ .supported = SUPPORTED_40000baseKR4_Full,
+ .advertised = ADVERTISED_40000baseKR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_56GBASE_R4] = {
+ .supported = SUPPORTED_56000baseKR4_Full,
+ .advertised = ADVERTISED_56000baseKR4_Full,
+ .speed = 56000,
+ },
+ [MLX5E_10GBASE_CR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_SR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_ER] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_40GBASE_SR4] = {
+ .supported = SUPPORTED_40000baseSR4_Full,
+ .advertised = ADVERTISED_40000baseSR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_40GBASE_LR4] = {
+ .supported = SUPPORTED_40000baseLR4_Full,
+ .advertised = ADVERTISED_40000baseLR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_100GBASE_CR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_SR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_KR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_LR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100BASE_TX] = {
+ .speed = 100,
+ },
+ [MLX5E_100BASE_T] = {
+ .supported = SUPPORTED_100baseT_Full,
+ .advertised = ADVERTISED_100baseT_Full,
+ .speed = 100,
+ },
+ [MLX5E_10GBASE_T] = {
+ .supported = SUPPORTED_10000baseT_Full,
+ .advertised = ADVERTISED_10000baseT_Full,
+ .speed = 1000,
+ },
+ [MLX5E_25GBASE_CR] = {
+ .speed = 25000,
+ },
+ [MLX5E_25GBASE_KR] = {
+ .speed = 25000,
+ },
+ [MLX5E_25GBASE_SR] = {
+ .speed = 25000,
+ },
+ [MLX5E_50GBASE_CR2] = {
+ .speed = 50000,
+ },
+ [MLX5E_50GBASE_KR2] = {
+ .speed = 50000,
+ },
+};
+
+static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return NUM_VPORT_COUNTERS +
+ priv->params.num_channels * NUM_RQ_STATS +
+ priv->params.num_channels * priv->num_tc *
+ NUM_SQ_STATS;
+ /* fallthrough */
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mlx5e_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+{
+ int i, j, tc, idx = 0;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ break;
+
+ case ETH_SS_TEST:
+ break;
+
+ case ETH_SS_STATS:
+ /* VPORT counters */
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vport_strings[i]);
+
+ /* per channel counters */
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ "rx%d_%s", i, rq_stats_strings[j]);
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (tc = 0; tc < priv->num_tc; tc++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ sprintf(data +
+ (idx++) * ETH_GSTRING_LEN,
+ "tx%d_%d_%s", i, tc,
+ sq_stats_strings[j]);
+ break;
+ }
+}
+
+static void mlx5e_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int i, j, tc, idx = 0;
+
+ if (!data)
+ return;
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_update_stats(priv);
+ mutex_unlock(&priv->state_lock);
+
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ data[idx++] = ((u64 *)&priv->stats.vport)[i];
+
+ /* per channel counters */
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+ &priv->state) ? 0 :
+ ((u64 *)&priv->channel[i]->rq.stats)[j];
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (tc = 0; tc < priv->num_tc; tc++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+ &priv->state) ? 0 :
+ ((u64 *)&priv->channel[i]->sq[tc].stats)[j];
+}
+
+static void mlx5e_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
+ param->rx_pending = 1 << priv->params.log_rq_size;
+ param->tx_pending = 1 << priv->params.log_sq_size;
+}
+
+static int mlx5e_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_params new_params;
+ u16 min_rx_wqes;
+ u8 log_rq_size;
+ u8 log_sq_size;
+ int err = 0;
+
+ if (param->rx_jumbo_pending) {
+ netdev_info(dev, "%s: rx_jumbo_pending not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (param->rx_mini_pending) {
+ netdev_info(dev, "%s: rx_mini_pending not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+ netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
+ __func__, param->rx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
+ netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
+ __func__, param->rx_pending,
+ 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
+ netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
+ __func__, param->tx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
+ netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n",
+ __func__, param->tx_pending,
+ 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
+ return -EINVAL;
+ }
+
+ log_rq_size = order_base_2(param->rx_pending);
+ log_sq_size = order_base_2(param->tx_pending);
+ min_rx_wqes = min_t(u16, param->rx_pending - 1,
+ MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+
+ if (log_rq_size == priv->params.log_rq_size &&
+ log_sq_size == priv->params.log_sq_size &&
+ min_rx_wqes == priv->params.min_rx_wqes)
+ return 0;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+ new_params.log_rq_size = log_rq_size;
+ new_params.log_sq_size = log_sq_size;
+ new_params.min_rx_wqes = min_rx_wqes;
+ err = mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static void mlx5e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+
+ ch->max_combined = ncv;
+ ch->combined_count = priv->params.num_channels;
+}
+
+static int mlx5e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+ unsigned int count = ch->combined_count;
+ struct mlx5e_params new_params;
+ int err = 0;
+
+ if (!count) {
+ netdev_info(dev, "%s: combined_count=0 not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (ch->rx_count || ch->tx_count) {
+ netdev_info(dev, "%s: separate rx/tx count not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (count > ncv) {
+ netdev_info(dev, "%s: count (%d) > max (%d)\n",
+ __func__, count, ncv);
+ return -EINVAL;
+ }
+
+ if (priv->params.num_channels == count)
+ return 0;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+ new_params.num_channels = count;
+ err = mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static int mlx5e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
+ coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
+ coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
+ coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts;
+
+ return 0;
+}
+
+static int mlx5e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_channel *c;
+ int tc;
+ int i;
+
+ priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
+ priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
+ priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
+ priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
+
+ for (i = 0; i < priv->params.num_channels; ++i) {
+ c = priv->channel[i];
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ mlx5_core_modify_cq_moderation(mdev,
+ &c->sq[tc].cq.mcq,
+ coal->tx_coalesce_usecs,
+ coal->tx_max_coalesced_frames);
+ }
+
+ mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
+ coal->rx_coalesce_usecs,
+ coal->rx_max_coalesced_frames);
+ }
+
+ return 0;
+}
+
+static u32 ptys2ethtool_supported_link(u32 eth_proto_cap)
+{
+ int i;
+ u32 supported_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_cap & MLX5E_PROT_MASK(i))
+ supported_modes |= ptys2ethtool_table[i].supported;
+ }
+ return supported_modes;
+}
+
+static u32 ptys2ethtool_adver_link(u32 eth_proto_cap)
+{
+ int i;
+ u32 advertising_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_cap & MLX5E_PROT_MASK(i))
+ advertising_modes |= ptys2ethtool_table[i].advertised;
+ }
+ return advertising_modes;
+}
+
+static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
+{
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+ return SUPPORTED_FIBRE;
+ }
+
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
+ return SUPPORTED_Backplane;
+ }
+ return 0;
+}
+
+static void get_speed_duplex(struct net_device *netdev,
+ u32 eth_proto_oper,
+ struct ethtool_cmd *cmd)
+{
+ int i;
+ u32 speed = SPEED_UNKNOWN;
+ u8 duplex = DUPLEX_UNKNOWN;
+
+ if (!netif_carrier_ok(netdev))
+ goto out;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_oper & MLX5E_PROT_MASK(i)) {
+ speed = ptys2ethtool_table[i].speed;
+ duplex = DUPLEX_FULL;
+ break;
+ }
+ }
+out:
+ ethtool_cmd_speed_set(cmd, speed);
+ cmd->duplex = duplex;
+}
+
+static void get_supported(u32 eth_proto_cap, u32 *supported)
+{
+ *supported |= ptys2ethtool_supported_port(eth_proto_cap);
+ *supported |= ptys2ethtool_supported_link(eth_proto_cap);
+ *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+}
+
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
+ u8 rx_pause, u32 *advertising)
+{
+ *advertising |= ptys2ethtool_adver_link(eth_proto_cap);
+ *advertising |= tx_pause ? ADVERTISED_Pause : 0;
+ *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0;
+}
+
+static u8 get_connector_port(u32 eth_proto)
+{
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+ return PORT_FIBRE;
+ }
+
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) {
+ return PORT_DA;
+ }
+
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) {
+ return PORT_NONE;
+ }
+
+ return PORT_OTHER;
+}
+
+static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising)
+{
+ *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp);
+}
+
+static int mlx5e_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ u32 eth_proto_lp;
+ u32 eth_proto_oper;
+ int err;
+
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
+
+ if (err) {
+ netdev_err(netdev, "%s: query port ptys failed: %d\n",
+ __func__, err);
+ goto err_query_ptys;
+ }
+
+ eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+ eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+ eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+
+ cmd->supported = 0;
+ cmd->advertising = 0;
+
+ get_supported(eth_proto_cap, &cmd->supported);
+ get_advertising(eth_proto_admin, 0, 0, &cmd->advertising);
+ get_speed_duplex(netdev, eth_proto_oper, cmd);
+
+ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+
+ cmd->port = get_connector_port(eth_proto_oper);
+ get_lp_advertising(eth_proto_lp, &cmd->lp_advertising);
+
+ cmd->transceiver = XCVR_INTERNAL;
+
+err_query_ptys:
+ return err;
+}
+
+static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes)
+{
+ u32 i, ptys_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (ptys2ethtool_table[i].advertised & link_modes)
+ ptys_modes |= MLX5E_PROT_MASK(i);
+ }
+
+ return ptys_modes;
+}
+
+static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
+{
+ u32 i, speed_links = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (ptys2ethtool_table[i].speed == speed)
+ speed_links |= MLX5E_PROT_MASK(i);
+ }
+
+ return speed_links;
+}
+
+static int mlx5e_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 link_modes;
+ u32 speed;
+ u32 eth_proto_cap, eth_proto_admin;
+ u8 port_status;
+ int err;
+
+ speed = ethtool_cmd_speed(cmd);
+
+ link_modes = cmd->autoneg == AUTONEG_ENABLE ?
+ mlx5e_ethtool2ptys_adver_link(cmd->advertising) :
+ mlx5e_ethtool2ptys_speed_link(speed);
+
+ err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: query port eth proto cap failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ link_modes = link_modes & eth_proto_cap;
+ if (!link_modes) {
+ netdev_err(netdev, "%s: Not supported link mode(s) requested",
+ __func__);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: query port eth proto admin failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ if (link_modes == eth_proto_admin)
+ goto out;
+
+ err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = mlx5_query_port_status(mdev, &port_status);
+ if (err)
+ goto out;
+
+ if (port_status == MLX5_PORT_DOWN)
+ return 0;
+
+ err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
+ if (err)
+ goto out;
+ err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
+out:
+ return err;
+}
+
+const struct ethtool_ops mlx5e_ethtool_ops = {
+ .get_drvinfo = mlx5e_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlx5e_get_strings,
+ .get_sset_count = mlx5e_get_sset_count,
+ .get_ethtool_stats = mlx5e_get_ethtool_stats,
+ .get_ringparam = mlx5e_get_ringparam,
+ .set_ringparam = mlx5e_set_ringparam,
+ .get_channels = mlx5e_get_channels,
+ .set_channels = mlx5e_set_channels,
+ .get_coalesce = mlx5e_get_coalesce,
+ .set_coalesce = mlx5e_set_coalesce,
+ .get_settings = mlx5e_get_settings,
+ .set_settings = mlx5e_set_settings,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
new file mode 100644
index 000000000000..120db80c47aa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -0,0 +1,860 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+enum {
+ MLX5E_FULLMATCH = 0,
+ MLX5E_ALLMULTI = 1,
+ MLX5E_PROMISC = 2,
+};
+
+enum {
+ MLX5E_UC = 0,
+ MLX5E_MC_IPV4 = 1,
+ MLX5E_MC_IPV6 = 2,
+ MLX5E_MC_OTHER = 3,
+};
+
+enum {
+ MLX5E_ACTION_NONE = 0,
+ MLX5E_ACTION_ADD = 1,
+ MLX5E_ACTION_DEL = 2,
+};
+
+struct mlx5e_eth_addr_hash_node {
+ struct hlist_node hlist;
+ u8 action;
+ struct mlx5e_eth_addr_info ai;
+};
+
+static inline int mlx5e_hash_eth_addr(u8 *addr)
+{
+ return addr[5];
+}
+
+static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ int ix = mlx5e_hash_eth_addr(addr);
+ int found = 0;
+
+ hlist_for_each_entry(hn, &hash[ix], hlist)
+ if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
+ found = 1;
+ break;
+ }
+
+ if (found) {
+ hn->action = MLX5E_ACTION_NONE;
+ return;
+ }
+
+ hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
+ if (!hn)
+ return;
+
+ ether_addr_copy(hn->ai.addr, addr);
+ hn->action = MLX5E_ACTION_ADD;
+
+ hlist_add_head(&hn->hlist, &hash[ix]);
+}
+
+static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+{
+ hlist_del(&hn->hlist);
+ kfree(hn);
+}
+
+static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai)
+{
+ void *ft = priv->ft.main;
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
+}
+
+static int mlx5e_get_eth_addr_type(u8 *addr)
+{
+ if (is_unicast_ether_addr(addr))
+ return MLX5E_UC;
+
+ if ((addr[0] == 0x01) &&
+ (addr[1] == 0x00) &&
+ (addr[2] == 0x5e) &&
+ !(addr[3] & 0x80))
+ return MLX5E_MC_IPV4;
+
+ if ((addr[0] == 0x33) &&
+ (addr[1] == 0x33))
+ return MLX5E_MC_IPV6;
+
+ return MLX5E_MC_OTHER;
+}
+
+static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
+{
+ int eth_addr_type;
+ u32 ret;
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
+ switch (eth_addr_type) {
+ case MLX5E_UC:
+ ret =
+ (1 << MLX5E_TT_IPV4_TCP) |
+ (1 << MLX5E_TT_IPV6_TCP) |
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+
+ case MLX5E_MC_IPV4:
+ ret =
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ 0;
+ break;
+
+ case MLX5E_MC_IPV6:
+ ret =
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV6) |
+ 0;
+ break;
+
+ case MLX5E_MC_OTHER:
+ ret =
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+ }
+
+ break;
+
+ case MLX5E_ALLMULTI:
+ ret =
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+
+ default: /* MLX5E_PROMISC */
+ ret =
+ (1 << MLX5E_TT_IPV4_TCP) |
+ (1 << MLX5E_TT_IPV6_TCP) |
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+ }
+
+ return ret;
+}
+
+static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai, int type,
+ void *flow_context, void *match_criteria)
+{
+ u8 match_criteria_enable = 0;
+ void *match_value;
+ void *dest;
+ u8 *dmac;
+ u8 *match_criteria_dmac;
+ void *ft = priv->ft.main;
+ u32 *tirn = priv->tirn;
+ u32 tt_vec;
+ int err;
+
+ match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.dmac_47_16);
+ match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dmac_47_16);
+ dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+ MLX5_SET(flow_context, flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+ MLX5_SET(dest_format_struct, dest, destination_type,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ memset(match_criteria_dmac, 0xff, ETH_ALEN);
+ ether_addr_copy(dmac, ai->addr);
+ break;
+
+ case MLX5E_ALLMULTI:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ match_criteria_dmac[0] = 0x01;
+ dmac[0] = 0x01;
+ break;
+
+ case MLX5E_PROMISC:
+ break;
+ }
+
+ tt_vec = mlx5e_get_tt_vec(ai, type);
+
+ if (tt_vec & (1 << MLX5E_TT_ANY)) {
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_ANY]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_ANY]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_ANY);
+ }
+
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.ethertype);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+ }
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_UDP);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_UDP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_UDP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+ }
+
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_TCP);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_TCP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_TCP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+ }
+
+ return 0;
+}
+
+static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai, int type)
+{
+ u32 *flow_context;
+ u32 *match_criteria;
+ int err;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+ MLX5_ST_SZ_BYTES(dest_format_struct));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!flow_context || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_eth_addr_rule_out;
+ }
+
+ err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
+ match_criteria);
+ if (err)
+ netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_eth_addr_rule_out:
+ kvfree(match_criteria);
+ kvfree(flow_context);
+ return err;
+}
+
+enum mlx5e_vlan_rule_type {
+ MLX5E_VLAN_RULE_TYPE_UNTAGGED,
+ MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+};
+
+static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+ u8 match_criteria_enable = 0;
+ u32 *flow_context;
+ void *match_value;
+ void *dest;
+ u32 *match_criteria;
+ u32 *ft_ix;
+ int err;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+ MLX5_ST_SZ_BYTES(dest_format_struct));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!flow_context || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_vlan_rule_out;
+ }
+ match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+ MLX5_SET(flow_context, flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+ MLX5_SET(dest_format_struct, dest, destination_type,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ mlx5_get_flow_table_id(priv->ft.main));
+
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.vlan_tag);
+
+ switch (rule_type) {
+ case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ ft_ix = &priv->vlan.untagged_rule_ft_ix;
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+ ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
+ MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+ 1);
+ break;
+ default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
+ ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
+ MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+ 1);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
+ vid);
+ break;
+ }
+
+ err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
+ match_criteria, flow_context, ft_ix);
+ if (err)
+ netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_vlan_rule_out:
+ kvfree(match_criteria);
+ kvfree(flow_context);
+ return err;
+}
+
+static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+ switch (rule_type) {
+ case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.untagged_rule_ft_ix);
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.any_vlan_rule_ft_ix);
+ break;
+ case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.active_vlans_ft_ix[vid]);
+ break;
+ }
+}
+
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
+{
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ if (priv->vlan.filter_disabled) {
+ priv->vlan.filter_disabled = false;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ }
+}
+
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
+{
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ if (!priv->vlan.filter_disabled) {
+ priv->vlan.filter_disabled = true;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ }
+}
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ set_bit(vid, priv->vlan.active_vlans);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+ vid);
+
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mutex_lock(&priv->state_lock);
+
+ clear_bit(vid, priv->vlan.active_vlans);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
+{
+ u16 vid;
+ int err;
+
+ for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+ vid);
+ if (err)
+ return err;
+ }
+
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ if (err)
+ return err;
+
+ if (priv->vlan.filter_disabled) {
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
+{
+ u16 vid;
+
+ if (priv->vlan.filter_disabled)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+ for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+}
+
+#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
+ for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+ hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
+
+static void mlx5e_execute_action(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_hash_node *hn)
+{
+ switch (hn->action) {
+ case MLX5E_ACTION_ADD:
+ mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+ hn->action = MLX5E_ACTION_NONE;
+ break;
+
+ case MLX5E_ACTION_DEL:
+ mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
+ mlx5e_del_eth_addr_from_hash(hn);
+ break;
+ }
+}
+
+static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(netdev);
+
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
+ priv->netdev->dev_addr);
+
+ netdev_for_each_uc_addr(ha, netdev)
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
+
+ netdev_for_each_mc_addr(ha, netdev)
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
+
+ netif_addr_unlock_bh(netdev);
+}
+
+static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ struct hlist_node *tmp;
+ int i;
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+ mlx5e_execute_action(priv, hn);
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+ mlx5e_execute_action(priv, hn);
+}
+
+static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ struct hlist_node *tmp;
+ int i;
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+ hn->action = MLX5E_ACTION_DEL;
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+ hn->action = MLX5E_ACTION_DEL;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_sync_netdev_addr(priv);
+
+ mlx5e_apply_netdev_addr(priv);
+}
+
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+ struct net_device *ndev = priv->netdev;
+
+ bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
+ bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+ bool broadcast_enabled = rx_mode_enable;
+
+ bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
+ bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
+ bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
+ bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
+ bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
+ bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
+
+ if (enable_promisc)
+ mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+ if (enable_allmulti)
+ mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+ if (enable_broadcast)
+ mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+
+ mlx5e_handle_netdev_addr(priv);
+
+ if (disable_broadcast)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+ if (disable_allmulti)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+ if (disable_promisc)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+
+ ea->promisc_enabled = promisc_enabled;
+ ea->allmulti_enabled = allmulti_enabled;
+ ea->broadcast_enabled = broadcast_enabled;
+}
+
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ set_rx_mode_work);
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_set_rx_mode_core(priv);
+ mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
+{
+ ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
+}
+
+static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table_group *g;
+ u8 *dmac;
+
+ g = kcalloc(9, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+
+ g[0].log_sz = 2;
+ g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[1].log_sz = 1;
+ g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+ outer_headers.ethertype);
+
+ g[2].log_sz = 0;
+
+ g[3].log_sz = 14;
+ g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+ MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[4].log_sz = 13;
+ g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+ MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
+ outer_headers.ethertype);
+
+ g[5].log_sz = 11;
+ g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+
+ g[6].log_sz = 2;
+ g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[7].log_sz = 1;
+ g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
+ outer_headers.ethertype);
+
+ g[8].log_sz = 0;
+ g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+ 9, g);
+ kfree(g);
+
+ return priv->ft.main ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_flow_table(priv->ft.main);
+}
+
+static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table_group *g;
+
+ g = kcalloc(2, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+
+ g[0].log_sz = 12;
+ g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.first_vid);
+
+ /* untagged + any vlan id */
+ g[1].log_sz = 1;
+ g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+ outer_headers.vlan_tag);
+
+ priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+ 2, g);
+
+ kfree(g);
+ return priv->ft.vlan ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_flow_table(priv->ft.vlan);
+}
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_create_main_flow_table(priv);
+ if (err)
+ return err;
+
+ err = mlx5e_create_vlan_flow_table(priv);
+ if (err)
+ goto err_destroy_main_flow_table;
+
+ return 0;
+
+err_destroy_main_flow_table:
+ mlx5e_destroy_main_flow_table(priv);
+
+ return err;
+}
+
+void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5e_destroy_vlan_flow_table(priv);
+ mlx5e_destroy_main_flow_table(priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
new file mode 100644
index 000000000000..7348c5173aa9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -0,0 +1,1899 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+struct mlx5e_rq_param {
+ u32 rqc[MLX5_ST_SZ_DW(rqc)];
+ struct mlx5_wq_param wq;
+};
+
+struct mlx5e_sq_param {
+ u32 sqc[MLX5_ST_SZ_DW(sqc)];
+ struct mlx5_wq_param wq;
+};
+
+struct mlx5e_cq_param {
+ u32 cqc[MLX5_ST_SZ_DW(cqc)];
+ struct mlx5_wq_param wq;
+ u16 eq_ix;
+};
+
+struct mlx5e_channel_param {
+ struct mlx5e_rq_param rq;
+ struct mlx5e_sq_param sq;
+ struct mlx5e_cq_param rx_cq;
+ struct mlx5e_cq_param tx_cq;
+};
+
+static void mlx5e_update_carrier(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 port_state;
+
+ port_state = mlx5_query_vport_state(mdev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT);
+
+ if (port_state == VPORT_STATE_UP)
+ netif_carrier_on(priv->netdev);
+ else
+ netif_carrier_off(priv->netdev);
+}
+
+static void mlx5e_update_carrier_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ update_carrier_work);
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_update_carrier(priv);
+ mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_vport_stats *s = &priv->stats.vport;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_sq_stats *sq_stats;
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ u64 tx_offload_none;
+ int i, j;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return;
+
+ /* Collect firts the SW counters and then HW for consistency */
+ s->tso_packets = 0;
+ s->tso_bytes = 0;
+ s->tx_queue_stopped = 0;
+ s->tx_queue_wake = 0;
+ s->tx_queue_dropped = 0;
+ tx_offload_none = 0;
+ s->lro_packets = 0;
+ s->lro_bytes = 0;
+ s->rx_csum_none = 0;
+ s->rx_wqe_err = 0;
+ for (i = 0; i < priv->params.num_channels; i++) {
+ rq_stats = &priv->channel[i]->rq.stats;
+
+ s->lro_packets += rq_stats->lro_packets;
+ s->lro_bytes += rq_stats->lro_bytes;
+ s->rx_csum_none += rq_stats->csum_none;
+ s->rx_wqe_err += rq_stats->wqe_err;
+
+ for (j = 0; j < priv->num_tc; j++) {
+ sq_stats = &priv->channel[i]->sq[j].stats;
+
+ s->tso_packets += sq_stats->tso_packets;
+ s->tso_bytes += sq_stats->tso_bytes;
+ s->tx_queue_stopped += sq_stats->stopped;
+ s->tx_queue_wake += sq_stats->wake;
+ s->tx_queue_dropped += sq_stats->dropped;
+ tx_offload_none += sq_stats->csum_offload_none;
+ }
+ }
+
+ /* HW counters */
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_vport_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+ MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+ MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+
+ memset(out, 0, outlen);
+
+ if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
+ goto free_out;
+
+#define MLX5_GET_CTR(p, x) \
+ MLX5_GET64(query_vport_counter_out, p, x)
+
+ s->rx_error_packets =
+ MLX5_GET_CTR(out, received_errors.packets);
+ s->rx_error_bytes =
+ MLX5_GET_CTR(out, received_errors.octets);
+ s->tx_error_packets =
+ MLX5_GET_CTR(out, transmit_errors.packets);
+ s->tx_error_bytes =
+ MLX5_GET_CTR(out, transmit_errors.octets);
+
+ s->rx_unicast_packets =
+ MLX5_GET_CTR(out, received_eth_unicast.packets);
+ s->rx_unicast_bytes =
+ MLX5_GET_CTR(out, received_eth_unicast.octets);
+ s->tx_unicast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
+ s->tx_unicast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
+
+ s->rx_multicast_packets =
+ MLX5_GET_CTR(out, received_eth_multicast.packets);
+ s->rx_multicast_bytes =
+ MLX5_GET_CTR(out, received_eth_multicast.octets);
+ s->tx_multicast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
+ s->tx_multicast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
+
+ s->rx_broadcast_packets =
+ MLX5_GET_CTR(out, received_eth_broadcast.packets);
+ s->rx_broadcast_bytes =
+ MLX5_GET_CTR(out, received_eth_broadcast.octets);
+ s->tx_broadcast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
+ s->tx_broadcast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+
+ s->rx_packets =
+ s->rx_unicast_packets +
+ s->rx_multicast_packets +
+ s->rx_broadcast_packets;
+ s->rx_bytes =
+ s->rx_unicast_bytes +
+ s->rx_multicast_bytes +
+ s->rx_broadcast_bytes;
+ s->tx_packets =
+ s->tx_unicast_packets +
+ s->tx_multicast_packets +
+ s->tx_broadcast_packets;
+ s->tx_bytes =
+ s->tx_unicast_bytes +
+ s->tx_multicast_bytes +
+ s->tx_broadcast_bytes;
+
+ /* Update calculated offload counters */
+ s->tx_csum_offload = s->tx_packets - tx_offload_none;
+ s->rx_csum_good = s->rx_packets - s->rx_csum_none;
+
+free_out:
+ kvfree(out);
+}
+
+static void mlx5e_update_stats_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
+ update_stats_work);
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_update_stats(priv);
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(
+ MLX5E_UPDATE_STATS_INTERVAL));
+ }
+ mutex_unlock(&priv->state_lock);
+}
+
+static void __mlx5e_async_event(struct mlx5e_priv *priv,
+ enum mlx5_dev_event event)
+{
+ switch (event) {
+ case MLX5_DEV_EVENT_PORT_UP:
+ case MLX5_DEV_EVENT_PORT_DOWN:
+ schedule_work(&priv->update_carrier_work);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
+ enum mlx5_dev_event event, unsigned long param)
+{
+ struct mlx5e_priv *priv = vpriv;
+
+ spin_lock(&priv->async_events_spinlock);
+ if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+ __mlx5e_async_event(priv, event);
+ spin_unlock(&priv->async_events_spinlock);
+}
+
+static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
+{
+ set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+}
+
+static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
+{
+ spin_lock_irq(&priv->async_events_spinlock);
+ clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ spin_unlock_irq(&priv->async_events_spinlock);
+}
+
+static void mlx5e_send_nop(struct mlx5e_sq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+
+ memset(cseg, 0, sizeof(*cseg));
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ sq->skb[pi] = NULL;
+ sq->pc++;
+ mlx5e_tx_notify_hw(sq, wqe);
+}
+
+static int mlx5e_create_rq(struct mlx5e_channel *c,
+ struct mlx5e_rq_param *param,
+ struct mlx5e_rq *rq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *rqc = param->rqc;
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ int wq_sz;
+ int err;
+ int i;
+
+ err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+ &rq->wq_ctrl);
+ if (err)
+ return err;
+
+ rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
+
+ wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!rq->skb) {
+ err = -ENOMEM;
+ goto err_rq_wq_destroy;
+ }
+
+ rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
+ priv->netdev->mtu + ETH_HLEN + VLAN_HLEN;
+
+ for (i = 0; i < wq_sz; i++) {
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
+
+ wqe->data.lkey = c->mkey_be;
+ wqe->data.byte_count = cpu_to_be32(rq->wqe_sz);
+ }
+
+ rq->pdev = c->pdev;
+ rq->netdev = c->netdev;
+ rq->channel = c;
+ rq->ix = c->ix;
+
+ return 0;
+
+err_rq_wq_destroy:
+ mlx5_wq_destroy(&rq->wq_ctrl);
+
+ return err;
+}
+
+static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+{
+ kfree(rq->skb);
+ mlx5_wq_destroy(&rq->wq_ctrl);
+}
+
+static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
+ sizeof(u64) * rq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ memcpy(rqc, param->rqc, sizeof(param->rqc));
+
+ MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_array(&rq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+ MLX5_SET(rqc, rqc, state, next_state);
+
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_disable_rq(struct mlx5e_rq *rq)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_core_destroy_rq(mdev, rq->rqn);
+}
+
+static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_wq_ll *wq = &rq->wq;
+ int i;
+
+ for (i = 0; i < 1000; i++) {
+ if (wq->cur_sz >= priv->params.min_rx_wqes)
+ return 0;
+
+ msleep(20);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mlx5e_open_rq(struct mlx5e_channel *c,
+ struct mlx5e_rq_param *param,
+ struct mlx5e_rq *rq)
+{
+ int err;
+
+ err = mlx5e_create_rq(c, param, rq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_rq(rq, param);
+ if (err)
+ goto err_destroy_rq;
+
+ err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err)
+ goto err_disable_rq;
+
+ set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ mlx5e_send_nop(&c->sq[0]); /* trigger mlx5e_post_rx_wqes() */
+
+ return 0;
+
+err_disable_rq:
+ mlx5e_disable_rq(rq);
+err_destroy_rq:
+ mlx5e_destroy_rq(rq);
+
+ return err;
+}
+
+static void mlx5e_close_rq(struct mlx5e_rq *rq)
+{
+ clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
+
+ mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+ while (!mlx5_wq_ll_is_empty(&rq->wq))
+ msleep(20);
+
+ /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
+ napi_synchronize(&rq->channel->napi);
+
+ mlx5e_disable_rq(rq);
+ mlx5e_destroy_rq(rq);
+}
+
+static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+{
+ kfree(sq->dma_fifo);
+ kfree(sq->skb);
+}
+
+static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+{
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+ sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
+ sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
+ numa);
+
+ if (!sq->skb || !sq->dma_fifo) {
+ mlx5e_free_sq_db(sq);
+ return -ENOMEM;
+ }
+
+ sq->dma_fifo_mask = df_sz - 1;
+
+ return 0;
+}
+
+static int mlx5e_create_sq(struct mlx5e_channel *c,
+ int tc,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_sq *sq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *sqc = param->sqc;
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ int err;
+
+ err = mlx5_alloc_map_uar(mdev, &sq->uar);
+ if (err)
+ return err;
+
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
+ &sq->wq_ctrl);
+ if (err)
+ goto err_unmap_free_uar;
+
+ sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+ sq->uar_map = sq->uar.map;
+ sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+ if (mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)))
+ goto err_sq_wq_destroy;
+
+ sq->txq = netdev_get_tx_queue(priv->netdev,
+ c->ix + tc * priv->params.num_channels);
+
+ sq->pdev = c->pdev;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->tc = tc;
+
+ return 0;
+
+err_sq_wq_destroy:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &sq->uar);
+
+ return err;
+}
+
+static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+
+ mlx5e_free_sq_db(sq);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ mlx5_unmap_free_uar(priv->mdev, &sq->uar);
+}
+
+static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *sqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, param->sqc, sizeof(param->sqc));
+
+ MLX5_SET(sqc, sqc, user_index, sq->tc);
+ MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
+ MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, tis_lst_sz, 1);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, sq->uar.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *sqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+ MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+ MLX5_SET(sqc, sqc, state, next_state);
+
+ err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_disable_sq(struct mlx5e_sq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+}
+
+static int mlx5e_open_sq(struct mlx5e_channel *c,
+ int tc,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_sq *sq)
+{
+ int err;
+
+ err = mlx5e_create_sq(c, tc, param, sq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_sq(sq, param);
+ if (err)
+ goto err_destroy_sq;
+
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
+ if (err)
+ goto err_disable_sq;
+
+ set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+ netdev_tx_reset_queue(sq->txq);
+ netif_tx_start_queue(sq->txq);
+
+ return 0;
+
+err_disable_sq:
+ mlx5e_disable_sq(sq);
+err_destroy_sq:
+ mlx5e_destroy_sq(sq);
+
+ return err;
+}
+
+static inline void netif_tx_disable_queue(struct netdev_queue *txq)
+{
+ __netif_tx_lock_bh(txq);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock_bh(txq);
+}
+
+static void mlx5e_close_sq(struct mlx5e_sq *sq)
+{
+ clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+ napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
+ netif_tx_disable_queue(sq->txq);
+
+ /* ensure hw is notified of all pending wqes */
+ if (mlx5e_sq_has_room_for(sq, 1))
+ mlx5e_send_nop(sq);
+
+ mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+ while (sq->cc != sq->pc) /* wait till sq is empty */
+ msleep(20);
+
+ /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
+ napi_synchronize(&sq->channel->napi);
+
+ mlx5e_disable_sq(sq);
+ mlx5e_destroy_sq(sq);
+}
+
+static int mlx5e_create_cq(struct mlx5e_channel *c,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int eqn_not_used;
+ int irqn;
+ int err;
+ u32 i;
+
+ param->wq.numa = cpu_to_node(c->cpu);
+ param->eq_ix = c->ix;
+
+ err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+ &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+ cq->napi = &c->napi;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+ *mcq->set_ci_db = 0;
+ *mcq->arm_db = 0;
+ mcq->vector = param->eq_ix;
+ mcq->comp = mlx5e_completion_event;
+ mcq->event = mlx5e_cq_error_event;
+ mcq->irqn = irqn;
+ mcq->uar = &priv->cq_uar;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->channel = c;
+
+ return 0;
+}
+
+static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
+{
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
+{
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+
+ void *in;
+ void *cqc;
+ int inlen;
+ int irqn_not_used;
+ int eqn;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, param->cqc, sizeof(param->cqc));
+
+ mlx5_fill_page_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen);
+
+ kvfree(in);
+
+ if (err)
+ return err;
+
+ mlx5e_cq_arm(cq);
+
+ return 0;
+}
+
+static void mlx5e_disable_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_core_destroy_cq(mdev, &cq->mcq);
+}
+
+static int mlx5e_open_cq(struct mlx5e_channel *c,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq,
+ u16 moderation_usecs,
+ u16 moderation_frames)
+{
+ int err;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ err = mlx5e_create_cq(c, param, cq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_cq(cq, param);
+ if (err)
+ goto err_destroy_cq;
+
+ err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
+ moderation_usecs,
+ moderation_frames);
+ if (err)
+ goto err_destroy_cq;
+
+ return 0;
+
+err_destroy_cq:
+ mlx5e_destroy_cq(cq);
+
+ return err;
+}
+
+static void mlx5e_close_cq(struct mlx5e_cq *cq)
+{
+ mlx5e_disable_cq(cq);
+ mlx5e_destroy_cq(cq);
+}
+
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+ return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
+static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
+ struct mlx5e_channel_param *cparam)
+{
+ struct mlx5e_priv *priv = c->priv;
+ int err;
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
+ priv->params.tx_cq_moderation_usec,
+ priv->params.tx_cq_moderation_pkts);
+ if (err)
+ goto err_close_tx_cqs;
+
+ c->sq[tc].cq.sqrq = &c->sq[tc];
+ }
+
+ return 0;
+
+err_close_tx_cqs:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_cq(&c->sq[tc].cq);
+
+ return err;
+}
+
+static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_close_cq(&c->sq[tc].cq);
+}
+
+static int mlx5e_open_sqs(struct mlx5e_channel *c,
+ struct mlx5e_channel_param *cparam)
+{
+ int err;
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
+ if (err)
+ goto err_close_sqs;
+ }
+
+ return 0;
+
+err_close_sqs:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_sq(&c->sq[tc]);
+
+ return err;
+}
+
+static void mlx5e_close_sqs(struct mlx5e_channel *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_close_sq(&c->sq[tc]);
+}
+
+static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+ struct mlx5e_channel_param *cparam,
+ struct mlx5e_channel **cp)
+{
+ struct net_device *netdev = priv->netdev;
+ int cpu = mlx5e_get_cpu(priv, ix);
+ struct mlx5e_channel *c;
+ int err;
+
+ c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+ if (!c)
+ return -ENOMEM;
+
+ c->priv = priv;
+ c->ix = ix;
+ c->cpu = cpu;
+ c->pdev = &priv->mdev->pdev->dev;
+ c->netdev = priv->netdev;
+ c->mkey_be = cpu_to_be32(priv->mr.key);
+ c->num_tc = priv->num_tc;
+
+ netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
+
+ err = mlx5e_open_tx_cqs(c, cparam);
+ if (err)
+ goto err_napi_del;
+
+ err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
+ priv->params.rx_cq_moderation_usec,
+ priv->params.rx_cq_moderation_pkts);
+ if (err)
+ goto err_close_tx_cqs;
+ c->rq.cq.sqrq = &c->rq;
+
+ napi_enable(&c->napi);
+
+ err = mlx5e_open_sqs(c, cparam);
+ if (err)
+ goto err_disable_napi;
+
+ err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
+ if (err)
+ goto err_close_sqs;
+
+ netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
+ *cp = c;
+
+ return 0;
+
+err_close_sqs:
+ mlx5e_close_sqs(c);
+
+err_disable_napi:
+ napi_disable(&c->napi);
+ mlx5e_close_cq(&c->rq.cq);
+
+err_close_tx_cqs:
+ mlx5e_close_tx_cqs(c);
+
+err_napi_del:
+ netif_napi_del(&c->napi);
+ kfree(c);
+
+ return err;
+}
+
+static void mlx5e_close_channel(struct mlx5e_channel *c)
+{
+ mlx5e_close_rq(&c->rq);
+ mlx5e_close_sqs(c);
+ napi_disable(&c->napi);
+ mlx5e_close_cq(&c->rq.cq);
+ mlx5e_close_tx_cqs(c);
+ netif_napi_del(&c->napi);
+ kfree(c);
+}
+
+static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+ struct mlx5e_rq_param *param)
+{
+ void *rqc = param->rqc;
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
+ MLX5_SET(wq, wq, pd, priv->pdn);
+
+ param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.linear = 1;
+}
+
+static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, priv->pdn);
+
+ param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+}
+
+static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
+}
+
+static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
+
+ mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
+
+ mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
+ struct mlx5e_channel_param *cparam)
+{
+ memset(cparam, 0, sizeof(*cparam));
+
+ mlx5e_build_rq_param(priv, &cparam->rq);
+ mlx5e_build_sq_param(priv, &cparam->sq);
+ mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
+ mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+}
+
+static int mlx5e_open_channels(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channel_param cparam;
+ int err;
+ int i;
+ int j;
+
+ priv->channel = kcalloc(priv->params.num_channels,
+ sizeof(struct mlx5e_channel *), GFP_KERNEL);
+ if (!priv->channel)
+ return -ENOMEM;
+
+ mlx5e_build_channel_param(priv, &cparam);
+ for (i = 0; i < priv->params.num_channels; i++) {
+ err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+ if (err)
+ goto err_close_channels;
+ }
+
+ for (j = 0; j < priv->params.num_channels; j++) {
+ err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
+ if (err)
+ goto err_close_channels;
+ }
+
+ return 0;
+
+err_close_channels:
+ for (i--; i >= 0; i--)
+ mlx5e_close_channel(priv->channel[i]);
+
+ kfree(priv->channel);
+
+ return err;
+}
+
+static void mlx5e_close_channels(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ mlx5e_close_channel(priv->channel[i]);
+
+ kfree(priv->channel);
+}
+
+static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(tisc, tisc, prio, tc);
+
+ return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+{
+ mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_open_tises(struct mlx5e_priv *priv)
+{
+ int num_tc = priv->num_tc;
+ int err;
+ int tc;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ err = mlx5e_open_tis(priv, tc);
+ if (err)
+ goto err_close_tises;
+ }
+
+ return 0;
+
+err_close_tises:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_tis(priv, tc);
+
+ return err;
+}
+
+static void mlx5e_close_tises(struct mlx5e_priv *priv)
+{
+ int num_tc = priv->num_tc;
+ int tc;
+
+ for (tc = 0; tc < num_tc; tc++)
+ mlx5e_close_tis(priv, tc);
+}
+
+static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+ void *rqtc;
+ int inlen;
+ int err;
+ int sz;
+ int i;
+
+ sz = 1 << priv->params.rx_hash_log_tbl_sz;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+ for (i = 0; i < sz; i++) {
+ int ix = i % priv->params.num_channels;
+
+ MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
+ }
+
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
+ if (!err)
+ priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_close_rqt(struct mlx5e_priv *priv)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
+
+ mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+{
+ void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+ if (priv->params.lro_en) {
+ MLX5_SET(tirc, tirc, lro_enable_mask,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (priv->params.lro_wqe_sz -
+ ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+ MLX5_CAP_ETH(priv->mdev,
+ lro_timer_supported_periods[3]));
+ }
+
+ switch (tt) {
+ case MLX5E_TT_ANY:
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn,
+ priv->channel[0]->rq.rqn);
+ break;
+ default:
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table,
+ priv->rqtn);
+ MLX5_SET(tirc, tirc, rx_hash_fn,
+ MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
+ rx_hash_toeplitz_key),
+ MLX5_FLD_SZ_BYTES(tirc,
+ rx_hash_toeplitz_key));
+ break;
+ }
+
+ switch (tt) {
+ case MLX5E_TT_IPV4_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV6_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV4_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV6_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV4:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+
+ case MLX5E_TT_IPV6:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+ }
+}
+
+static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *tirc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ mlx5e_build_tir_ctx(priv, tirc, tt);
+
+ err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
+{
+ mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int mlx5e_open_tirs(struct mlx5e_priv *priv)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++) {
+ err = mlx5e_open_tir(priv, i);
+ if (err)
+ goto err_close_tirs;
+ }
+
+ return 0;
+
+err_close_tirs:
+ for (i--; i >= 0; i--)
+ mlx5e_close_tir(priv, i);
+
+ return err;
+}
+
+static void mlx5e_close_tirs(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++)
+ mlx5e_close_tir(priv, i);
+}
+
+int mlx5e_open_locked(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int actual_mtu;
+ int num_txqs;
+ int err;
+
+ num_txqs = roundup_pow_of_two(priv->params.num_channels) *
+ priv->params.num_tc;
+ netif_set_real_num_tx_queues(netdev, num_txqs);
+ netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
+
+ err = mlx5_set_port_mtu(mdev, netdev->mtu);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_set_port_mtu failed %d\n",
+ __func__, err);
+ return err;
+ }
+
+ err = mlx5_query_port_oper_mtu(mdev, &actual_mtu, 1);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_query_port_oper_mtu failed %d\n",
+ __func__, err);
+ return err;
+ }
+
+ if (actual_mtu != netdev->mtu)
+ netdev_warn(netdev, "%s: Failed to set MTU to %d\n",
+ __func__, netdev->mtu);
+
+ netdev->mtu = actual_mtu;
+
+ err = mlx5e_open_tises(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
+ __func__, err);
+ return err;
+ }
+
+ err = mlx5e_open_channels(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
+ __func__, err);
+ goto err_close_tises;
+ }
+
+ err = mlx5e_open_rqt(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
+ __func__, err);
+ goto err_close_channels;
+ }
+
+ err = mlx5e_open_tirs(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
+ __func__, err);
+ goto err_close_rqls;
+ }
+
+ err = mlx5e_open_flow_table(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
+ __func__, err);
+ goto err_close_tirs;
+ }
+
+ err = mlx5e_add_all_vlan_rules(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
+ __func__, err);
+ goto err_close_flow_table;
+ }
+
+ mlx5e_init_eth_addr(priv);
+
+ set_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ mlx5e_update_carrier(priv);
+ mlx5e_set_rx_mode_core(priv);
+
+ schedule_delayed_work(&priv->update_stats_work, 0);
+ return 0;
+
+err_close_flow_table:
+ mlx5e_close_flow_table(priv);
+
+err_close_tirs:
+ mlx5e_close_tirs(priv);
+
+err_close_rqls:
+ mlx5e_close_rqt(priv);
+
+err_close_channels:
+ mlx5e_close_channels(priv);
+
+err_close_tises:
+ mlx5e_close_tises(priv);
+
+ return err;
+}
+
+static int mlx5e_open(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_open_locked(netdev);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_close_locked(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ mlx5e_set_rx_mode_core(priv);
+ mlx5e_del_all_vlan_rules(priv);
+ netif_carrier_off(priv->netdev);
+ mlx5e_close_flow_table(priv);
+ mlx5e_close_tirs(priv);
+ mlx5e_close_rqt(priv);
+ mlx5e_close_channels(priv);
+ mlx5e_close_tises(priv);
+
+ return 0;
+}
+
+static int mlx5e_close(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_close_locked(netdev);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *new_params)
+{
+ int err = 0;
+ int was_opened;
+
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(priv->netdev);
+
+ priv->params = *new_params;
+
+ if (was_opened)
+ err = mlx5e_open_locked(priv->netdev);
+
+ return err;
+}
+
+static struct rtnl_link_stats64 *
+mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_vport_stats *vstats = &priv->stats.vport;
+
+ stats->rx_packets = vstats->rx_packets;
+ stats->rx_bytes = vstats->rx_bytes;
+ stats->tx_packets = vstats->tx_packets;
+ stats->tx_bytes = vstats->tx_bytes;
+ stats->multicast = vstats->rx_multicast_packets +
+ vstats->tx_multicast_packets;
+ stats->tx_errors = vstats->tx_error_packets;
+ stats->rx_errors = vstats->rx_error_packets;
+ stats->tx_dropped = vstats->tx_queue_dropped;
+ stats->rx_crc_errors = 0;
+ stats->rx_length_errors = 0;
+
+ return stats;
+}
+
+static void mlx5e_set_rx_mode(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ schedule_work(&priv->set_rx_mode_work);
+}
+
+static int mlx5e_set_mac(struct net_device *netdev, void *addr)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ netif_addr_lock_bh(netdev);
+ ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+ netif_addr_unlock_bh(netdev);
+
+ schedule_work(&priv->set_rx_mode_work);
+
+ return 0;
+}
+
+static int mlx5e_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ netdev_features_t changes = features ^ netdev->features;
+ struct mlx5e_params new_params;
+ bool update_params = false;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+
+ if (changes & NETIF_F_LRO) {
+ new_params.lro_en = !!(features & NETIF_F_LRO);
+ update_params = true;
+ }
+
+ if (update_params)
+ mlx5e_update_priv_params(priv, &new_params);
+
+ if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ mlx5e_enable_vlan_filter(priv);
+ else
+ mlx5e_disable_vlan_filter(priv);
+ }
+
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int max_mtu;
+ int err = 0;
+
+ err = mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+ if (err)
+ return err;
+
+ if (new_mtu > max_mtu || new_mtu < MLX5E_PARAMS_MIN_MTU) {
+ netdev_err(netdev, "%s: Bad MTU size, mtu must be [%d-%d]\n",
+ __func__, MLX5E_PARAMS_MIN_MTU, max_mtu);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->state_lock);
+ netdev->mtu = new_mtu;
+ err = mlx5e_update_priv_params(priv, &priv->params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static struct net_device_ops mlx5e_netdev_ops = {
+ .ndo_open = mlx5e_open,
+ .ndo_stop = mlx5e_close,
+ .ndo_start_xmit = mlx5e_xmit,
+ .ndo_get_stats64 = mlx5e_get_stats,
+ .ndo_set_rx_mode = mlx5e_set_rx_mode,
+ .ndo_set_mac_address = mlx5e_set_mac,
+ .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
+ .ndo_set_features = mlx5e_set_features,
+ .ndo_change_mtu = mlx5e_change_mtu,
+};
+
+static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
+{
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return -ENOTSUPP;
+ if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
+ !MLX5_CAP_GEN(mdev, nic_flow_table) ||
+ !MLX5_CAP_ETH(mdev, csum_cap) ||
+ !MLX5_CAP_ETH(mdev, max_lso_cap) ||
+ !MLX5_CAP_ETH(mdev, vlan_cap) ||
+ !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap)) {
+ mlx5_core_warn(mdev,
+ "Not creating net device, some required device capabilities are missing\n");
+ return -ENOTSUPP;
+ }
+ return 0;
+}
+
+static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ int num_comp_vectors)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ priv->params.log_sq_size =
+ MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ priv->params.log_rq_size =
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ priv->params.rx_cq_moderation_usec =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+ priv->params.rx_cq_moderation_pkts =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+ priv->params.tx_cq_moderation_usec =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+ priv->params.tx_cq_moderation_pkts =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ priv->params.min_rx_wqes =
+ MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
+ priv->params.rx_hash_log_tbl_sz =
+ (order_base_2(num_comp_vectors) >
+ MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
+ order_base_2(num_comp_vectors) :
+ MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
+ priv->params.num_tc = 1;
+ priv->params.default_vlan_prio = 0;
+
+ priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
+ priv->params.lro_wqe_sz =
+ MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->params.num_channels = num_comp_vectors;
+ priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
+ priv->queue_mapping_channel_mask =
+ roundup_pow_of_two(num_comp_vectors) - 1;
+ priv->num_tc = priv->params.num_tc;
+ priv->default_vlan_prio = priv->params.default_vlan_prio;
+
+ spin_lock_init(&priv->async_events_spinlock);
+ mutex_init(&priv->state_lock);
+
+ INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
+ INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+ INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+}
+
+static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr);
+}
+
+static void mlx5e_build_netdev(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
+
+ if (priv->num_tc > 1) {
+ mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
+ mlx5e_netdev_ops.ndo_start_xmit = mlx5e_xmit_multi_tc;
+ }
+
+ netdev->netdev_ops = &mlx5e_netdev_ops;
+ netdev->watchdog_timeo = 15 * HZ;
+
+ netdev->ethtool_ops = &mlx5e_ethtool_ops;
+
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_IPV6_CSUM;
+ netdev->vlan_features |= NETIF_F_GRO;
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_RXCSUM;
+ netdev->vlan_features |= NETIF_F_RXHASH;
+
+ if (!!MLX5_CAP_ETH(mdev, lro_cap))
+ netdev->vlan_features |= NETIF_F_LRO;
+
+ netdev->hw_features = netdev->vlan_features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->features = netdev->hw_features;
+ if (!priv->params.lro_en)
+ netdev->features &= ~NETIF_F_LRO;
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ mlx5e_set_netdev_dev_addr(netdev);
+}
+
+static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
+ struct mlx5_core_mr *mr)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_create_mkey_mbox_in *in;
+ int err;
+
+ in = mlx5_vzalloc(sizeof(*in));
+ if (!in)
+ return -ENOMEM;
+
+ in->seg.flags = MLX5_PERM_LOCAL_WRITE |
+ MLX5_PERM_LOCAL_READ |
+ MLX5_ACCESS_MODE_PA;
+ in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+ err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
+ NULL);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
+{
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int ncv = mdev->priv.eq_table.num_comp_vectors;
+ int err;
+
+ if (mlx5e_check_required_hca_cap(mdev))
+ return NULL;
+
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
+ roundup_pow_of_two(ncv) * MLX5E_MAX_NUM_TC,
+ ncv);
+ if (!netdev) {
+ mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
+ return NULL;
+ }
+
+ mlx5e_build_netdev_priv(mdev, netdev, ncv);
+ mlx5e_build_netdev(netdev);
+
+ netif_carrier_off(netdev);
+
+ priv = netdev_priv(netdev);
+
+ err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
+ __func__, err);
+ goto err_free_netdev;
+ }
+
+ err = mlx5_core_alloc_pd(mdev, &priv->pdn);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
+ __func__, err);
+ goto err_unmap_free_uar;
+ }
+
+ err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
+ __func__, err);
+ goto err_dealloc_pd;
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ netdev_err(netdev, "%s: register_netdev failed, %d\n",
+ __func__, err);
+ goto err_destroy_mkey;
+ }
+
+ mlx5e_enable_async_events(priv);
+
+ return priv;
+
+err_destroy_mkey:
+ mlx5_core_destroy_mkey(mdev, &priv->mr);
+
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(mdev, priv->pdn);
+
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &priv->cq_uar);
+
+err_free_netdev:
+ free_netdev(netdev);
+
+ return NULL;
+}
+
+static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+ struct net_device *netdev = priv->netdev;
+
+ unregister_netdev(netdev);
+ mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
+ mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
+ mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+ mlx5e_disable_async_events(priv);
+ flush_scheduled_work();
+ free_netdev(netdev);
+}
+
+static void *mlx5e_get_netdev(void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+
+ return priv->netdev;
+}
+
+static struct mlx5_interface mlx5e_interface = {
+ .add = mlx5e_create_netdev,
+ .remove = mlx5e_destroy_netdev,
+ .event = mlx5e_async_event,
+ .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
+ .get_dev = mlx5e_get_netdev,
+};
+
+void mlx5e_init(void)
+{
+ mlx5_register_interface(&mlx5e_interface);
+}
+
+void mlx5e_cleanup(void)
+{
+ mlx5_unregister_interface(&mlx5e_interface);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
new file mode 100644
index 000000000000..ce1317cdabd7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include "en.h"
+
+static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_reserve(skb, MLX5E_NET_IP_ALIGN);
+
+ dma_addr = dma_map_single(rq->pdev,
+ /* hw start padding */
+ skb->data - MLX5E_NET_IP_ALIGN,
+ /* hw end padding */
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
+ goto err_free_skb;
+
+ *((dma_addr_t *)skb->cb) = dma_addr;
+ wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
+
+ rq->skb[ix] = skb;
+
+ return 0;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+
+ return -ENOMEM;
+}
+
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
+ return false;
+
+ while (!mlx5_wq_ll_is_full(wq)) {
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+ if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
+ break;
+
+ mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+ }
+
+ /* ensure wqes are visible to device before updating doorbell record */
+ dma_wmb();
+
+ mlx5_wq_ll_update_db_record(wq);
+
+ return !mlx5_wq_ll_is_full(wq);
+}
+
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
+ struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+ struct tcphdr *tcp;
+
+ u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+ int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
+ (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
+
+ u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
+
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ sizeof(struct iphdr));
+ ipv6 = NULL;
+ } else {
+ tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ sizeof(struct ipv6hdr));
+ ipv4 = NULL;
+ }
+
+ if (get_cqe_lro_tcppsh(cqe))
+ tcp->psh = 1;
+
+ if (tcp_ack) {
+ tcp->ack = 1;
+ tcp->ack_seq = cqe->lro_ack_seq_num;
+ tcp->window = cqe->lro_tcp_win;
+ }
+
+ if (ipv4) {
+ ipv4->ttl = cqe->lro_min_ttl;
+ ipv4->tot_len = cpu_to_be16(tot_len);
+ ipv4->check = 0;
+ ipv4->check = ip_fast_csum((unsigned char *)ipv4,
+ ipv4->ihl);
+ } else {
+ ipv6->hop_limit = cqe->lro_min_ttl;
+ ipv6->payload_len = cpu_to_be16(tot_len -
+ sizeof(struct ipv6hdr));
+ }
+}
+
+static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb)
+{
+ u8 cht = cqe->rss_hash_type;
+ int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
+ (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
+ PKT_HASH_TYPE_NONE;
+ skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
+}
+
+static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+ struct mlx5e_rq *rq,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rq->netdev;
+ u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ int lro_num_seg;
+
+ skb_put(skb, cqe_bcnt);
+
+ lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+ if (lro_num_seg > 1) {
+ mlx5e_lro_update_hdr(skb, cqe);
+ skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ rq->stats.lro_packets++;
+ rq->stats.lro_bytes += cqe_bcnt;
+ }
+
+ if (likely(netdev->features & NETIF_F_RXCSUM) &&
+ (cqe->hds_ip_ext & CQE_L2_OK) &&
+ (cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ rq->stats.csum_none++;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ skb_record_rx_queue(skb, rq->ix);
+
+ if (likely(netdev->features & NETIF_F_RXHASH))
+ mlx5e_skb_set_hash(cqe, skb);
+
+ if (cqe_has_vlan(cqe))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ be16_to_cpu(cqe->vlan_info));
+}
+
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+{
+ struct mlx5e_rq *rq = cq->sqrq;
+ int i;
+
+ /* avoid accessing cq (dma coherent memory) if not needed */
+ if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+ return false;
+
+ for (i = 0; i < budget; i++) {
+ struct mlx5e_rx_wqe *wqe;
+ struct mlx5_cqe64 *cqe;
+ struct sk_buff *skb;
+ __be16 wqe_counter_be;
+ u16 wqe_counter;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ skb = rq->skb[wqe_counter];
+ rq->skb[wqe_counter] = NULL;
+
+ dma_unmap_single(rq->pdev,
+ *((dma_addr_t *)skb->cb),
+ skb_end_offset(skb),
+ DMA_FROM_DEVICE);
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ dev_kfree_skb(skb);
+ goto wq_ll_pop;
+ }
+
+ mlx5e_build_rx_skb(cqe, rq, skb);
+ rq->stats.packets++;
+ napi_gro_receive(cq->napi, skb);
+
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ if (i == budget) {
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
new file mode 100644
index 000000000000..8020986cdaf6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include "en.h"
+
+static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
+ u32 *size)
+{
+ sq->dma_fifo_pc--;
+ *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr;
+ *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size;
+}
+
+static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+ dma_addr_t addr;
+ u32 size;
+ int i;
+
+ for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
+ mlx5e_dma_pop_last_pushed(sq, &addr, &size);
+ dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+ }
+}
+
+static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr,
+ u32 size)
+{
+ sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
+ sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
+ sq->dma_fifo_pc++;
+}
+
+static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr,
+ u32 *size)
+{
+ *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr;
+ *size = sq->dma_fifo[i & sq->dma_fifo_mask].size;
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int channel_ix = fallback(dev, skb);
+ int up = skb_vlan_tag_present(skb) ?
+ skb->vlan_tci >> VLAN_PRIO_SHIFT :
+ priv->default_vlan_prio;
+ int tc = netdev_get_prio_tc_map(dev, up);
+
+ return (tc << priv->order_base_2_num_channels) | channel_ix;
+}
+
+static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
+ struct sk_buff *skb)
+{
+#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+ return MLX5E_MIN_INLINE;
+}
+
+static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
+{
+ struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
+ int cpy1_sz = 2 * ETH_ALEN;
+ int cpy2_sz = ihs - cpy1_sz - VLAN_HLEN;
+
+ skb_copy_from_linear_data(skb, vhdr, cpy1_sz);
+ skb_pull_inline(skb, cpy1_sz);
+ vhdr->h_vlan_proto = skb->vlan_proto;
+ vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
+ skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto,
+ cpy2_sz);
+ skb_pull_inline(skb, cpy2_sz);
+}
+
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg;
+
+ u8 opcode = MLX5_OPCODE_SEND;
+ dma_addr_t dma_addr = 0;
+ u16 headlen;
+ u16 ds_cnt;
+ u16 ihs;
+ int i;
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ else
+ sq->stats.csum_offload_none++;
+
+ if (skb_is_gso(skb)) {
+ u32 payload_len;
+ int num_pkts;
+
+ eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ opcode = MLX5_OPCODE_LSO;
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ payload_len = skb->len - ihs;
+ num_pkts = (payload_len / skb_shinfo(skb)->gso_size) +
+ !!(payload_len % skb_shinfo(skb)->gso_size);
+ MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len +
+ (num_pkts - 1) * ihs;
+ sq->stats.tso_packets++;
+ sq->stats.tso_bytes += payload_len;
+ } else {
+ ihs = mlx5e_get_inline_hdr_size(sq, skb);
+ MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
+ ETH_ZLEN);
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs);
+ } else {
+ skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
+ skb_pull_inline(skb, ihs);
+ }
+
+ eseg->inline_hdr_sz = cpu_to_be16(ihs);
+
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+ ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
+ MLX5_SEND_WQE_DS);
+ dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
+
+ MLX5E_TX_SKB_CB(skb)->num_dma = 0;
+
+ headlen = skb_headlen(skb);
+ if (headlen) {
+ dma_addr = dma_map_single(sq->pdev, skb->data, headlen,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+ goto dma_unmap_wqe_err;
+
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->lkey = sq->mkey_be;
+ dseg->byte_count = cpu_to_be32(headlen);
+
+ mlx5e_dma_push(sq, dma_addr, headlen);
+ MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+ dseg++;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ int fsz = skb_frag_size(frag);
+
+ dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+ goto dma_unmap_wqe_err;
+
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->lkey = sq->mkey_be;
+ dseg->byte_count = cpu_to_be32(fsz);
+
+ mlx5e_dma_push(sq, dma_addr, fsz);
+ MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+ dseg++;
+ }
+
+ ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ sq->skb[pi] = skb;
+
+ MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt,
+ MLX5_SEND_WQEBB_NUM_DS);
+ sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+
+ netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);
+
+ if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS))) {
+ netif_tx_stop_queue(sq->txq);
+ sq->stats.stopped++;
+ }
+
+ if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
+ mlx5e_tx_notify_hw(sq, wqe);
+
+ sq->stats.packets++;
+ return NETDEV_TX_OK;
+
+dma_unmap_wqe_err:
+ sq->stats.dropped++;
+ mlx5e_dma_unmap_wqe_err(sq, skb);
+
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ix = skb->queue_mapping;
+ int tc = 0;
+ struct mlx5e_channel *c = priv->channel[ix];
+ struct mlx5e_sq *sq = &c->sq[tc];
+
+ return mlx5e_sq_xmit(sq, skb);
+}
+
+netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ix = skb->queue_mapping & priv->queue_mapping_channel_mask;
+ int tc = skb->queue_mapping >> priv->order_base_2_num_channels;
+ struct mlx5e_channel *c = priv->channel[ix];
+ struct mlx5e_sq *sq = &c->sq[tc];
+
+ return mlx5e_sq_xmit(sq, skb);
+}
+
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_sq *sq;
+ u32 dma_fifo_cc;
+ u32 nbytes;
+ u16 npkts;
+ u16 sqcc;
+ int i;
+
+ /* avoid accessing cq (dma coherent memory) if not needed */
+ if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+ return false;
+
+ sq = cq->sqrq;
+
+ npkts = 0;
+ nbytes = 0;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ /* avoid dirtying sq cache line every cqe */
+ dma_fifo_cc = sq->dma_fifo_cc;
+
+ for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+ struct mlx5_cqe64 *cqe;
+ struct sk_buff *skb;
+ u16 ci;
+ int j;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ ci = sqcc & sq->wq.sz_m1;
+ skb = sq->skb[ci];
+
+ if (unlikely(!skb)) { /* nop */
+ sq->stats.nop++;
+ sqcc++;
+ goto free_skb;
+ }
+
+ for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
+ dma_addr_t addr;
+ u32 size;
+
+ mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
+ dma_fifo_cc++;
+ dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+ }
+
+ npkts++;
+ nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes;
+ sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+
+free_skb:
+ dev_kfree_skb(skb);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->dma_fifo_cc = dma_fifo_cc;
+ sq->cc = sqcc;
+
+ netdev_tx_completed_queue(sq->txq, npkts, nbytes);
+
+ if (netif_tx_queue_stopped(sq->txq) &&
+ mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS) &&
+ likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
+ netif_tx_wake_queue(sq->txq);
+ sq->stats.wake++;
+ }
+ if (i == MLX5E_TX_CQ_POLL_BUDGET) {
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
new file mode 100644
index 000000000000..088bc424157c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
+{
+ struct mlx5_cqwq *wq = &cq->wq;
+ u32 ci = mlx5_cqwq_get_ci(wq);
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
+ int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
+ int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
+
+ if (cqe_ownership_bit != sw_ownership_val)
+ return NULL;
+
+ mlx5_cqwq_pop(wq);
+
+ /* ensure cqe content is read after cqe ownership bit */
+ rmb();
+
+ return cqe;
+}
+
+int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
+ napi);
+ bool busy = false;
+ int i;
+
+ clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
+
+ for (i = 0; i < c->num_tc; i++)
+ busy |= mlx5e_poll_tx_cq(&c->sq[i].cq);
+
+ busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget);
+
+ busy |= mlx5e_post_rx_wqes(c->rq.cq.sqrq);
+
+ if (busy)
+ return budget;
+
+ napi_complete(napi);
+
+ /* avoid losing completion event during/after polling cqs */
+ if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
+ napi_schedule(napi);
+ return 0;
+ }
+
+ for (i = 0; i < c->num_tc; i++)
+ mlx5e_cq_arm(&c->sq[i].cq);
+ mlx5e_cq_arm(&c->rq.cq);
+
+ return 0;
+}
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq)
+{
+ struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
+ barrier();
+ napi_schedule(cq->napi);
+}
+
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
+{
+ struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct net_device *netdev = priv->netdev;
+
+ netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
+ __func__, mcq->cqn, event);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 58800e4f3958..a40b96d4c662 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -339,15 +339,14 @@ static void init_eq_buf(struct mlx5_eq *eq)
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, struct mlx5_uar *uar)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_priv *priv = &dev->priv;
struct mlx5_create_eq_mbox_in *in;
struct mlx5_create_eq_mbox_out out;
int err;
int inlen;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
- err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
- &eq->buf);
+ err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
if (err)
return err;
@@ -378,14 +377,15 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
goto err_in;
}
- snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
+ snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
+
eq->eqn = out.eq_number;
eq->irqn = vecidx;
eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
- err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
- eq->name, eq);
+ err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
+ priv->irq_info[vecidx].name, eq);
if (err)
goto err_eq;
@@ -401,7 +401,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
return 0;
err_irq:
- free_irq(table->msix_arr[vecidx].vector, eq);
+ free_irq(priv->msix_arr[vecidx].vector, eq);
err_eq:
mlx5_cmd_destroy_eq(dev, eq->eqn);
@@ -417,16 +417,15 @@ EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
int err;
mlx5_debug_eq_remove(dev, eq);
- free_irq(table->msix_arr[eq->irqn].vector, eq);
+ free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
- synchronize_irq(table->msix_arr[eq->irqn].vector);
+ synchronize_irq(dev->priv.msix_arr[eq->irqn].vector);
mlx5_buf_free(dev, &eq->buf);
return err;
@@ -456,7 +455,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err;
- if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+ if (MLX5_CAP_GEN(dev, pg))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
@@ -479,7 +478,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
err = mlx5_create_map_eq(dev, &table->pages_eq,
MLX5_EQ_VEC_PAGES,
- dev->caps.gen.max_vf + 1,
+ /* TODO: sriov max_vf + */ 1,
1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
&dev->priv.uuari.uars[0]);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
new file mode 100644
index 000000000000..ca90b9bc3b95
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/flow_table.h>
+#include "mlx5_core.h"
+
+struct mlx5_ftg {
+ struct mlx5_flow_table_group g;
+ u32 id;
+ u32 start_ix;
+};
+
+struct mlx5_flow_table {
+ struct mlx5_core_dev *dev;
+ u8 level;
+ u8 type;
+ u32 id;
+ struct mutex mutex; /* sync bitmap alloc */
+ u16 num_groups;
+ struct mlx5_ftg *group;
+ unsigned long *bitmap;
+ u32 size;
+};
+
+static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
+ u32 flow_index, void *flow_context)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)];
+ u32 *in;
+ void *in_flow_context;
+ int fcdls =
+ MLX5_GET(flow_context, flow_context, destination_list_size) *
+ MLX5_ST_SZ_BYTES(dest_format_struct);
+ int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(set_fte_in, in, table_type, ft->type);
+ MLX5_SET(set_fte_in, in, table_id, ft->id);
+ MLX5_SET(set_fte_in, in, flow_index, flow_index);
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ memcpy(in_flow_context, flow_context,
+ MLX5_ST_SZ_BYTES(flow_context) + fcdls);
+
+ MLX5_SET(flow_context, in_flow_context, group_id,
+ ft->group[group_ix].id);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+ sizeof(out));
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
+ u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
+ MLX5_SET_DFTEI(in, table_type, ft->type);
+ MLX5_SET_DFTEI(in, table_id, ft->id);
+ MLX5_SET_DFTEI(in, flow_index, flow_index);
+ MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
+ MLX5_SET_DFGI(in, table_type, ft->type);
+ MLX5_SET_DFGI(in, table_id, ft->id);
+ MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET_DFGI(in, group_id, ft->group[i].id);
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
+ u32 *in;
+ void *in_match_criteria;
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_group *g = &ft->group[i].g;
+ u32 start_ix = ft->group[i].start_ix;
+ u32 end_ix = start_ix + (1 << g->log_sz) - 1;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+ in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
+ match_criteria);
+
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
+ MLX5_SET_CFGI(in, table_type, ft->type);
+ MLX5_SET_CFGI(in, table_id, ft->id);
+ MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+ MLX5_SET_CFGI(in, start_flow_index, start_ix);
+ MLX5_SET_CFGI(in, end_flow_index, end_ix);
+ MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
+
+ memcpy(in_match_criteria, g->match_criteria,
+ MLX5_ST_SZ_BYTES(fte_match_param));
+
+ err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+ sizeof(out));
+ if (!err)
+ ft->group[i].id = MLX5_GET(create_flow_group_out, out,
+ group_id);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
+{
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++)
+ mlx5_destroy_flow_group_cmd(ft, i);
+}
+
+static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++) {
+ err = mlx5_create_flow_group_cmd(ft, i);
+ if (err)
+ goto err_destroy_flow_table_groups;
+ }
+
+ return 0;
+
+err_destroy_flow_table_groups:
+ for (i--; i >= 0; i--)
+ mlx5_destroy_flow_group_cmd(ft, i);
+
+ return err;
+}
+
+static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(create_flow_table_in, in, table_type, ft->type);
+ MLX5_SET(create_flow_table_in, in, level, ft->level);
+ MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size));
+
+ MLX5_SET(create_flow_table_in, in, opcode,
+ MLX5_CMD_OP_CREATE_FLOW_TABLE);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ ft->id = MLX5_GET(create_flow_table_out, out, table_id);
+
+ return 0;
+}
+
+static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
+ MLX5_SET_DFTI(in, table_type, ft->type);
+ MLX5_SET_DFTI(in, table_id, ft->id);
+ MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
+ u32 *match_criteria, int *group_ix)
+{
+ void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers);
+ void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ misc_parameters);
+ void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ inner_headers);
+ int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+ int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc);
+ int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++) {
+ struct mlx5_flow_table_group *g = &ft->group[i].g;
+ void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ outer_headers);
+ void *gmc_misc = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ misc_parameters);
+ void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ inner_headers);
+
+ if (g->match_criteria_enable != match_criteria_enable)
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
+ if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
+ if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
+ if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
+ continue;
+
+ *group_ix = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
+{
+ struct mlx5_ftg *g = &ft->group[group_ix];
+ int err = 0;
+
+ mutex_lock(&ft->mutex);
+
+ *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
+ if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
+ err = -ENOSPC;
+ else
+ __set_bit(*ix, ft->bitmap);
+
+ mutex_unlock(&ft->mutex);
+
+ return err;
+}
+
+static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
+{
+ __clear_bit(ix, ft->bitmap);
+}
+
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+ void *match_criteria, void *flow_context,
+ u32 *flow_index)
+{
+ struct mlx5_flow_table *ft = flow_table;
+ int group_ix;
+ int err;
+
+ err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
+ &group_ix);
+ if (err) {
+ mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
+ return err;
+ }
+
+ err = alloc_flow_index(ft, group_ix, flow_index);
+ if (err) {
+ mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
+ return err;
+ }
+
+ return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
+}
+EXPORT_SYMBOL(mlx5_add_flow_table_entry);
+
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ mlx5_del_flow_entry_cmd(ft, flow_index);
+ mlx5_free_flow_index(ft, flow_index);
+}
+EXPORT_SYMBOL(mlx5_del_flow_table_entry);
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+ u16 num_groups,
+ struct mlx5_flow_table_group *group)
+{
+ struct mlx5_flow_table *ft;
+ u32 start_ix = 0;
+ u32 ft_size = 0;
+ void *gr;
+ void *bm;
+ int err;
+ int i;
+
+ for (i = 0; i < num_groups; i++)
+ ft_size += (1 << group[i].log_sz);
+
+ ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+ gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
+ bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
+ if (!ft || !gr || !bm)
+ goto err_free_ft;
+
+ ft->group = gr;
+ ft->bitmap = bm;
+ ft->num_groups = num_groups;
+ ft->level = level;
+ ft->type = table_type;
+ ft->size = ft_size;
+ ft->dev = dev;
+ mutex_init(&ft->mutex);
+
+ for (i = 0; i < ft->num_groups; i++) {
+ memcpy(&ft->group[i].g, &group[i], sizeof(*group));
+ ft->group[i].start_ix = start_ix;
+ start_ix += 1 << group[i].log_sz;
+ }
+
+ err = mlx5_create_flow_table_cmd(ft);
+ if (err)
+ goto err_free_ft;
+
+ err = mlx5_create_flow_table_groups(ft);
+ if (err)
+ goto err_destroy_flow_table_cmd;
+
+ return ft;
+
+err_destroy_flow_table_cmd:
+ mlx5_destroy_flow_table_cmd(ft);
+
+err_free_ft:
+ mlx5_core_warn(dev, "failed to alloc flow table\n");
+ kfree(bm);
+ kfree(gr);
+ kfree(ft);
+
+ return NULL;
+}
+EXPORT_SYMBOL(mlx5_create_flow_table);
+
+void mlx5_destroy_flow_table(void *flow_table)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ mlx5_destroy_flow_table_groups(ft);
+ mlx5_destroy_flow_table_cmd(ft);
+ kfree(ft->bitmap);
+ kfree(ft->group);
+ kfree(ft);
+}
+EXPORT_SYMBOL(mlx5_destroy_flow_table);
+
+u32 mlx5_get_flow_table_id(void *flow_table)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ return ft->id;
+}
+EXPORT_SYMBOL(mlx5_get_flow_table_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 4b4cda3bcc5f..9335e5ae18cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -35,79 +35,133 @@
#include <linux/module.h>
#include "mlx5_core.h"
-int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev)
+static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
+ int outlen)
{
- struct mlx5_cmd_query_adapter_mbox_out *out;
- struct mlx5_cmd_query_adapter_mbox_in in;
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_query_board_id(struct mlx5_core_dev *dev)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
int err;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- memset(&in, 0, sizeof(in));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
+ err = mlx5_cmd_query_adapter(dev, out, outlen);
if (err)
- goto out_out;
-
- if (out->hdr.status) {
- err = mlx5_cmd_status_to_err(&out->hdr);
- goto out_out;
- }
+ goto out;
- memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid));
+ memcpy(dev->board_id,
+ MLX5_ADDR_OF(query_adapter_out, out,
+ query_adapter_struct.vsd_contd_psid),
+ MLX5_FLD_SZ_BYTES(query_adapter_out,
+ query_adapter_struct.vsd_contd_psid));
-out_out:
+out:
kfree(out);
-
return err;
}
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps)
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
{
- return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR);
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+ int err;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_cmd_query_adapter(mdev, out, outlen);
+ if (err)
+ goto out;
+
+ *vendor_id = MLX5_GET(query_adapter_out, out,
+ query_adapter_struct.ieee_vendor_id);
+out:
+ kfree(out);
+ return err;
}
+EXPORT_SYMBOL(mlx5_core_query_vendor_id);
-int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps)
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
- u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
- int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *out;
int err;
- if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
- return -ENOTSUPP;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
- memset(in, 0, sizeof(in));
- out = kzalloc(out_sz, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
if (err)
- goto out;
+ return err;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err) {
- mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err);
- goto out;
+ if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
}
- memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct),
- sizeof(*caps));
+ if (MLX5_CAP_GEN(dev, pg)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
- mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n",
- be32_to_cpu(caps->per_transport_caps.rc_odp_caps),
- be32_to_cpu(caps->per_transport_caps.uc_odp_caps),
- be32_to_cpu(caps->per_transport_caps.ud_odp_caps));
+ if (MLX5_CAP_GEN(dev, atomic)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
-out:
- kfree(out);
- return err;
+ if (MLX5_CAP_GEN(dev, roce)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+ return 0;
}
-EXPORT_SYMBOL(mlx5_query_odp_caps);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 28425e5ea91f..afad529838de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -38,6 +38,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
+#include <linux/interrupt.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
@@ -47,10 +48,6 @@
#include <linux/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
-#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "3.0"
-#define DRIVER_RELDATE "January 2015"
-
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
MODULE_LICENSE("Dual BSD/GPL");
@@ -208,24 +205,28 @@ static void release_bar(struct pci_dev *pdev)
static int mlx5_enable_msix(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- int num_eqs = 1 << dev->caps.gen.log_max_eq;
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_eq_table *table = &priv->eq_table;
+ int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
int i;
- nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
+ nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+ MLX5_EQ_VEC_COMP_BASE;
nvec = min_t(int, nvec, num_eqs);
if (nvec <= MLX5_EQ_VEC_COMP_BASE)
return -ENOMEM;
- table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL);
- if (!table->msix_arr)
- return -ENOMEM;
+ priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL);
+
+ priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
+ if (!priv->msix_arr || !priv->irq_info)
+ goto err_free_msix;
for (i = 0; i < nvec; i++)
- table->msix_arr[i].entry = i;
+ priv->msix_arr[i].entry = i;
- nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
+ nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
MLX5_EQ_VEC_COMP_BASE + 1, nvec);
if (nvec < 0)
return nvec;
@@ -233,14 +234,20 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
return 0;
+
+err_free_msix:
+ kfree(priv->irq_info);
+ kfree(priv->msix_arr);
+ return -ENOMEM;
}
static void mlx5_disable_msix(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_priv *priv = &dev->priv;
pci_disable_msix(dev->pdev);
- kfree(table->msix_arr);
+ kfree(priv->irq_info);
+ kfree(priv->msix_arr);
}
struct mlx5_reg_host_endianess {
@@ -277,98 +284,20 @@ static u16 to_fw_pkey_sz(u32 size)
}
}
-/* selectively copy writable fields clearing any reserved area
- */
-static void copy_rw_fields(void *to, struct mlx5_caps *from)
-{
- __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22);
- u64 v64;
-
- MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp);
- MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
- MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
- MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
- MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
- MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12);
- v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
- *flags_off = cpu_to_be64(v64);
-}
-
-static u16 get_pkey_table_size(int pkey)
-{
- if (pkey > MLX5_MAX_LOG_PKEY_TABLE)
- return 0;
-
- return MLX5_MIN_PKEY_TABLE_SIZE << pkey;
-}
-
-static void fw2drv_caps(struct mlx5_caps *caps, void *out)
-{
- struct mlx5_general_caps *gen = &caps->gen;
-
- gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz);
- gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz);
- gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp);
- gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz);
- gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs);
- gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz);
- gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq);
- gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz);
- gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey);
- gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq);
- gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection);
- gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz);
- gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size);
- gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size);
- gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc);
- gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc);
- gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp);
- gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp);
- gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt);
- gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size));
- gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay);
- gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports);
- gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg);
- gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support);
- gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22));
- pr_debug("flags = 0x%llx\n", gen->flags);
- gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz);
- gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz);
- gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf);
- gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size);
- gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq);
- gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq);
- gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc);
- gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg);
- gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd);
- gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd);
- gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz);
-}
-
-static const char *caps_opmod_str(u16 opmod)
-{
- switch (opmod) {
- case HCA_CAP_OPMOD_GET_MAX:
- return "GET_MAX";
- case HCA_CAP_OPMOD_GET_CUR:
- return "GET_CUR";
- default:
- return "Invalid";
- }
-}
-
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
- u16 opmod)
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode)
{
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *out;
+ void *out, *hca_caps;
+ u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
int err;
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
+
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
@@ -377,12 +306,30 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
err = mlx5_cmd_status_to_err_v2(out);
if (err) {
- mlx5_core_warn(dev, "query max hca cap failed, %d\n", err);
+ mlx5_core_warn(dev,
+ "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
+ cap_type, cap_mode, err);
goto query_ex;
}
- mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
- fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct));
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+
+ switch (cap_mode) {
+ case HCA_CAP_OPMOD_GET_MAX:
+ memcpy(dev->hca_caps_max[cap_type], hca_caps,
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ break;
+ case HCA_CAP_OPMOD_GET_CUR:
+ memcpy(dev->hca_caps_cur[cap_type], hca_caps,
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ break;
+ default:
+ mlx5_core_warn(dev,
+ "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
+ cap_type, cap_mode);
+ err = -EINVAL;
+ break;
+ }
query_ex:
kfree(out);
return err;
@@ -409,49 +356,45 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
{
void *set_ctx = NULL;
struct mlx5_profile *prof = dev->profile;
- struct mlx5_caps *cur_caps = NULL;
- struct mlx5_caps *max_caps = NULL;
int err = -ENOMEM;
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *set_hca_cap;
set_ctx = kzalloc(set_sz, GFP_KERNEL);
if (!set_ctx)
goto query_ex;
- max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL);
- if (!max_caps)
- goto query_ex;
-
- cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL);
- if (!cur_caps)
- goto query_ex;
-
- err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
if (err)
goto query_ex;
- err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
if (err)
goto query_ex;
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
+ capability);
+ memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
+ MLX5_ST_SZ_BYTES(cmd_hca_cap));
+
+ mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
+ mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
+ 128);
/* we limit the size of the pkey table to 128 entries for now */
- cur_caps->gen.pkey_table_size = 128;
+ MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
+ to_fw_pkey_sz(128));
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
- cur_caps->gen.log_max_qp = prof->log_max_qp;
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
+ prof->log_max_qp);
- /* disable checksum */
- cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
+ /* disable cmdif checksum */
+ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
- copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct),
- cur_caps);
err = set_caps(dev, set_ctx, set_sz);
query_ex:
- kfree(cur_caps);
- kfree(max_caps);
kfree(set_ctx);
-
return err;
}
@@ -507,6 +450,74 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
return 0;
}
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct msix_entry *msix = priv->msix_arr;
+ int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+ int numa_node = dev_to_node(&mdev->pdev->dev);
+ int err;
+
+ if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+ mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+ return -ENOMEM;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ priv->irq_info[i].mask);
+
+ err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
+ if (err) {
+ mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
+ irq);
+ goto err_clear_mask;
+ }
+
+ return 0;
+
+err_clear_mask:
+ free_cpumask_var(priv->irq_info[i].mask);
+ return err;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct msix_entry *msix = priv->msix_arr;
+ int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+
+ irq_set_affinity_hint(irq, NULL);
+ free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+ err = mlx5_irq_set_affinity_hint(mdev, i);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ for (i--; i >= 0; i--)
+ mlx5_irq_clear_affinity_hint(mdev, i);
+
+ return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+ mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
@@ -549,7 +560,7 @@ static void free_comp_eqs(struct mlx5_core_dev *dev)
static int alloc_comp_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
- char name[MLX5_MAX_EQ_NAME];
+ char name[MLX5_MAX_IRQ_NAME];
struct mlx5_eq *eq;
int ncomp_vec;
int nent;
@@ -566,7 +577,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
goto clean;
}
- snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(dev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
name, &dev->priv.uuari.uars[0]);
@@ -588,6 +599,61 @@ clean:
return err;
}
+#ifdef CONFIG_MLX5_CORE_EN
+static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
+{
+ u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
+ u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
+ u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
+ u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
+ int err;
+ u32 sup_issi;
+
+ memset(query_in, 0, sizeof(query_in));
+ memset(query_out, 0, sizeof(query_out));
+
+ MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
+
+ err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
+ query_out, sizeof(query_out));
+ if (err) {
+ if (((struct mlx5_outbox_hdr *)query_out)->status ==
+ MLX5_CMD_STAT_BAD_OP_ERR) {
+ pr_debug("Only ISSI 0 is supported\n");
+ return 0;
+ }
+
+ pr_err("failed to query ISSI\n");
+ return err;
+ }
+
+ sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
+
+ if (sup_issi & (1 << 1)) {
+ memset(set_in, 0, sizeof(set_in));
+ memset(set_out, 0, sizeof(set_out));
+
+ MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
+ MLX5_SET(set_issi_in, set_in, current_issi, 1);
+
+ err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
+ set_out, sizeof(set_out));
+ if (err) {
+ pr_err("failed to set ISSI=1\n");
+ return err;
+ }
+
+ dev->issi = 1;
+
+ return 0;
+ } else if (sup_issi & (1 << 0) || !sup_issi) {
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+#endif
+
static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -650,6 +716,14 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_pagealloc_cleanup;
}
+#ifdef CONFIG_MLX5_CORE_EN
+ err = mlx5_core_set_issi(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to set issi\n");
+ goto err_disable_hca;
+ }
+#endif
+
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
dev_err(&pdev->dev, "failed to allocate boot pages\n");
@@ -688,15 +762,15 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
mlx5_start_health_poll(dev);
- err = mlx5_cmd_query_hca_cap(dev, &dev->caps);
+ err = mlx5_query_hca_caps(dev);
if (err) {
dev_err(&pdev->dev, "query hca failed\n");
goto err_stop_poll;
}
- err = mlx5_cmd_query_adapter(dev);
+ err = mlx5_query_board_id(dev);
if (err) {
- dev_err(&pdev->dev, "query adapter failed\n");
+ dev_err(&pdev->dev, "query board id failed\n");
goto err_stop_poll;
}
@@ -730,6 +804,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_stop_eqs;
}
+ err = mlx5_irq_set_affinity_hints(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+ goto err_free_comp_eqs;
+ }
+
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
mlx5_init_cq_table(dev);
@@ -739,6 +819,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
return 0;
+err_free_comp_eqs:
+ free_comp_eqs(dev);
+
err_stop_eqs:
mlx5_stop_eqs(dev);
@@ -793,6 +876,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
+ mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
@@ -1048,6 +1132,10 @@ static int __init init(void)
if (err)
goto err_health;
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5e_init();
+#endif
+
return 0;
err_health:
@@ -1060,6 +1148,9 @@ err_debug:
static void __exit cleanup(void)
{
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5e_cleanup();
+#endif
pci_unregister_driver(&mlx5_core_driver);
mlx5_health_cleanup();
destroy_workqueue(mlx5_core_wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index d79fd85d1dd5..d5a0c2d61a18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -91,7 +91,7 @@ int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG);
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG);
memcpy(in.gid, mgid, sizeof(*mgid));
in.qpn = cpu_to_be32(qpn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a051b906afdf..fc88ecaecb4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -37,6 +37,10 @@
#include <linux/kernel.h>
#include <linux/sched.h>
+#define DRIVER_NAME "mlx5_core"
+#define DRIVER_VERSION "3.0-1"
+#define DRIVER_RELDATE "January 2015"
+
extern int mlx5_core_debug_mask;
#define mlx5_core_dbg(dev, format, ...) \
@@ -65,11 +69,20 @@ enum {
MLX5_CMD_TIME, /* print command execution time */
};
+static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
+ int in_size, u32 *out,
+ int out_size)
+{
+ mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
+}
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
- struct mlx5_caps *caps);
-int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev);
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
+int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+void mlx5e_init(void);
+void mlx5e_cleanup(void);
+
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 49e90f2612d8..619d3baf19ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -102,3 +102,235 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
return err;
}
EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
+
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+ int ptys_size, int proto_mask, u8 local_port)
+{
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(ptys_reg, in, local_port, local_port);
+ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
+ ptys_size, MLX5_REG_PTYS, 0, 0);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
+
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+ u32 *proto_cap, int proto_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+ else
+ *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
+
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+ u32 *proto_admin, int proto_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+ else
+ *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
+
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+ u8 *link_width_oper, u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port);
+ if (err)
+ return err;
+
+ *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
+
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, int proto_mask,
+ u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ else
+ *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
+
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+ int proto_mask)
+{
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+ if (proto_mask == MLX5_PTYS_EN)
+ MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
+ else
+ MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PTYS, 0, 1);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
+
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status)
+{
+ u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 out[MLX5_ST_SZ_DW(paos_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(paos_reg, in, admin_status, status);
+ MLX5_SET(paos_reg, in, ase, 1);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 1);
+}
+
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+{
+ u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 out[MLX5_ST_SZ_DW(paos_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 0);
+ if (err)
+ return err;
+
+ *status = MLX5_GET(paos_reg, out, oper_status);
+ return err;
+}
+
+static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
+ int *admin_mtu, int *max_mtu, int *oper_mtu,
+ u8 local_port)
+{
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmtu_reg, in, local_port, local_port);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PMTU, 0, 0);
+ if (err)
+ return err;
+
+ if (max_mtu)
+ *max_mtu = MLX5_GET(pmtu_reg, out, max_mtu);
+ if (oper_mtu)
+ *oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
+ if (admin_mtu)
+ *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
+
+ return 0;
+}
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu)
+{
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
+ MLX5_SET(pmtu_reg, in, local_port, 1);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_PMTU, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
+
+int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+ u8 local_port)
+{
+ return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, local_port);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
+
+int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+ u8 local_port)
+{
+ return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, local_port);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
+
+static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
+ int pvlc_size, u8 local_port)
+{
+ u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(ptys_reg, in, local_port, local_port);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
+ pvlc_size, MLX5_REG_PVLC, 0, 0);
+
+ return err;
+}
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+ u8 *vl_hw_cap, u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(pvlc_reg)];
+ int err;
+
+ err = mlx5_query_port_pvlc(dev, out, sizeof(out), local_port);
+ if (err)
+ return err;
+
+ *vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index dc7dbf7e9d98..8b494b562263 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -187,10 +187,17 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_destroy_qp_mbox_in din;
struct mlx5_destroy_qp_mbox_out dout;
int err;
+ void *qpc;
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
+ if (dev->issi) {
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ /* 0xffffff means we ask to work with cqe version 0 */
+ MLX5_SET(qpc, qpc, user_index, 0xffffff);
+ }
+
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
mlx5_core_warn(dev, "ret %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index f9d25dcd03c1..c48f504ccbeb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/srq.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
+#include "transobj.h"
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
{
@@ -62,6 +63,74 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
complete(&srq->free);
}
+static int get_pas_size(void *srqc)
+{
+ u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
+ u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size);
+ u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
+ u32 page_offset = MLX5_GET(srqc, srqc, page_offset);
+ u32 po_quanta = 1 << (log_page_size - 6);
+ u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
+ u32 page_size = 1 << log_page_size;
+ u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
+ u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
+
+ return rq_num_pas * sizeof(u64);
+}
+
+static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
+{
+ void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ if (srqc_to_rmpc) {
+ switch (MLX5_GET(srqc, srqc, state)) {
+ case MLX5_SRQC_STATE_GOOD:
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ break;
+ case MLX5_SRQC_STATE_ERROR:
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
+ break;
+ default:
+ pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__,
+ __LINE__, MLX5_GET(srqc, srqc, state));
+ MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state));
+ }
+
+ MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature));
+ MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size));
+ MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
+ MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size));
+ MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset));
+ MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm));
+ MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd));
+ MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr));
+ } else {
+ switch (MLX5_GET(rmpc, rmpc, state)) {
+ case MLX5_RMPC_STATE_RDY:
+ MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
+ break;
+ case MLX5_RMPC_STATE_ERR:
+ MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
+ break;
+ default:
+ pr_warn("%s: %d: Unknown rmp state = 0x%x\n",
+ __func__, __LINE__,
+ MLX5_GET(rmpc, rmpc, state));
+ MLX5_SET(srqc, srqc, state,
+ MLX5_GET(rmpc, rmpc, state));
+ }
+
+ MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature));
+ MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz));
+ MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4);
+ MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz));
+ MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset));
+ MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm));
+ MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd));
+ MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr));
+ }
+}
+
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
{
struct mlx5_srq_table *table = &dev->priv.srq_table;
@@ -79,26 +148,311 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
}
EXPORT_SYMBOL(mlx5_core_get_srq);
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen)
+static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen)
{
struct mlx5_create_srq_mbox_out out;
- struct mlx5_srq_table *table = &dev->priv.srq_table;
- struct mlx5_destroy_srq_mbox_in din;
- struct mlx5_destroy_srq_mbox_out dout;
int err;
memset(&out, 0, sizeof(out));
+
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
- if (err)
- return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
+ err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out),
+ sizeof(out));
srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
+ return err;
+}
+
+static int destroy_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ struct mlx5_destroy_srq_mbox_in in;
+ struct mlx5_destroy_srq_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
+ in.srqn = cpu_to_be32(srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
+ (u32 *)(&out), sizeof(out));
+}
+
+static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq)
+{
+ struct mlx5_arm_srq_mbox_in in;
+ struct mlx5_arm_srq_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
+ in.hdr.opmod = cpu_to_be16(!!is_srq);
+ in.srqn = cpu_to_be32(srq->srqn);
+ in.lwm = cpu_to_be16(lwm);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in),
+ sizeof(in), (u32 *)(&out),
+ sizeof(out));
+}
+
+static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ struct mlx5_query_srq_mbox_in in;
+
+ memset(&in, 0, sizeof(in));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
+ in.srqn = cpu_to_be32(srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
+ (u32 *)out, sizeof(*out));
+}
+
+static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in,
+ int srq_inlen)
+{
+ u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ void *create_in;
+ void *srqc;
+ void *xrc_srqc;
+ void *pas;
+ int pas_size;
+ int inlen;
+ int err;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+ pas_size = get_pas_size(srqc);
+ inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
+ xrc_srq_context_entry);
+ pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
+
+ memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
+ memcpy(pas, in->pas, pas_size);
+ /* 0xffffff means we ask to work with cqe version 0 */
+ MLX5_SET(xrc_srqc, xrc_srqc, user_index, 0xffffff);
+ MLX5_SET(create_xrc_srq_in, create_in, opcode,
+ MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+ memset(create_out, 0, sizeof(create_out));
+ err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
+ sizeof(create_out));
+ if (err)
+ goto out;
+
+ srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
+out:
+ kvfree(create_in);
+ return err;
+}
+
+static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+ memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+
+ MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq, u16 lwm)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+ memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
+
+ return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ u32 *xrcsrq_out;
+ void *srqc;
+ void *xrc_srqc;
+ int err;
+
+ xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (!xrcsrq_out)
+ return -ENOMEM;
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+
+ MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
+ MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (err)
+ goto out;
+
+ xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
+ xrc_srq_context_entry);
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+
+out:
+ kvfree(xrcsrq_out);
+ return err;
+}
+
+static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int srq_inlen)
+{
+ void *create_in;
+ void *rmpc;
+ void *srqc;
+ int pas_size;
+ int inlen;
+ int err;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+ pas_size = get_pas_size(srqc);
+ inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
+
+ memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
+ rmpc_srqc_reformat(srqc, rmpc, true);
+
+ err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
+
+ kvfree(create_in);
+ return err;
+}
+
+static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ return mlx5_core_destroy_rmp(dev, srq->srqn);
+}
+
+static int arm_rmp_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ u16 lwm)
+{
+ void *in;
+ void *rmpc;
+ void *wq;
+ void *bitmask;
+ int err;
+
+ in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ if (!in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
+ bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
+ MLX5_SET(wq, wq, lwm, lwm);
+ MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+
+ err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+ kvfree(in);
+ return err;
+}
+
+static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ u32 *rmp_out;
+ void *rmpc;
+ void *srqc;
+ int err;
+
+ rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
+ if (!rmp_out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
+ if (err)
+ goto out;
+
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
+ rmpc_srqc_reformat(srqc, rmpc, false);
+
+out:
+ kvfree(rmp_out);
+ return err;
+}
+
+static int create_srq_split(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in,
+ int inlen, int is_xrc)
+{
+ if (!dev->issi)
+ return create_srq_cmd(dev, srq, in, inlen);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return create_xrc_srq_cmd(dev, srq, in, inlen);
+ else
+ return create_rmp_cmd(dev, srq, in, inlen);
+}
+
+static int destroy_srq_split(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ if (!dev->issi)
+ return destroy_srq_cmd(dev, srq);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return destroy_xrc_srq_cmd(dev, srq);
+ else
+ return destroy_rmp_cmd(dev, srq);
+}
+
+int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen,
+ int is_xrc)
+{
+ int err;
+ struct mlx5_srq_table *table = &dev->priv.srq_table;
+
+ srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
+
+ err = create_srq_split(dev, srq, in, inlen, is_xrc);
+ if (err)
+ return err;
+
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
@@ -107,25 +461,20 @@ int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
spin_unlock_irq(&table->lock);
if (err) {
mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
- goto err_cmd;
+ goto err_destroy_srq_split;
}
return 0;
-err_cmd:
- memset(&din, 0, sizeof(din));
- memset(&dout, 0, sizeof(dout));
- din.srqn = cpu_to_be32(srq->srqn);
- din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
- mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
+err_destroy_srq_split:
+ destroy_srq_split(dev, srq);
+
return err;
}
EXPORT_SYMBOL(mlx5_core_create_srq);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
{
- struct mlx5_destroy_srq_mbox_in in;
- struct mlx5_destroy_srq_mbox_out out;
struct mlx5_srq_table *table = &dev->priv.srq_table;
struct mlx5_core_srq *tmp;
int err;
@@ -142,17 +491,10 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
return -EINVAL;
}
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ err = destroy_srq_split(dev, srq);
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
if (atomic_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
@@ -164,48 +506,24 @@ EXPORT_SYMBOL(mlx5_core_destroy_srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
{
- struct mlx5_query_srq_mbox_in in;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(out, 0, sizeof(*out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
- if (err)
- return err;
-
- if (out->hdr.status)
- return mlx5_cmd_status_to_err(&out->hdr);
-
- return err;
+ if (!dev->issi)
+ return query_srq_cmd(dev, srq, out);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return query_xrc_srq_cmd(dev, srq, out);
+ else
+ return query_rmp_cmd(dev, srq, out);
}
EXPORT_SYMBOL(mlx5_core_query_srq);
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
- struct mlx5_arm_srq_mbox_in in;
- struct mlx5_arm_srq_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
- in.hdr.opmod = cpu_to_be16(!!is_srq);
- in.srqn = cpu_to_be32(srq->srqn);
- in.lwm = cpu_to_be16(lwm);
-
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ if (!dev->issi)
+ return arm_srq_cmd(dev, srq, lwm, is_srq);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return arm_xrc_srq_cmd(dev, srq, lwm);
+ else
+ return arm_rmp_cmd(dev, srq, lwm);
}
EXPORT_SYMBOL(mlx5_core_arm_srq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
new file mode 100644
index 000000000000..7a120283de2c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "transobj.h"
+
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rq_out)];
+ int err;
+
+ MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rqn = MLX5_GET(create_rq_out, out, rqn);
+
+ return err;
+}
+
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
+
+ MLX5_SET(modify_rq_in, in, rqn, rqn);
+ MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, in, rqn, rqn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)];
+ int err;
+
+ MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *sqn = MLX5_GET(create_sq_out, out, sqn);
+
+ return err;
+}
+
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
+
+ MLX5_SET(modify_sq_in, in, sqn, sqn);
+ MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, in, sqn, sqn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tirn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)];
+ int err;
+
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *tirn = MLX5_GET(create_tir_out, out, tirn);
+
+ return err;
+}
+
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
+ u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, in, tirn, tirn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tisn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tis_out)];
+ int err;
+
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *tisn = MLX5_GET(create_tis_out, out, tisn);
+
+ return err;
+}
+
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_out)];
+ u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rmp_out)];
+ int err;
+
+ MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rmpn = MLX5_GET(create_rmp_out, out, rmpn);
+
+ return err;
+}
+
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rmp_out)];
+
+ MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_rmp_in)];
+ int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
+ MLX5_SET(query_rmp_in, in, rmpn, rmpn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
+{
+ void *in;
+ void *rmpc;
+ void *wq;
+ void *bitmask;
+ int err;
+
+ in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ if (!in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
+ bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, rmpn, rmpn);
+ MLX5_SET(wq, wq, lwm, lwm);
+ MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+
+ err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *xsrqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ int err;
+
+ MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
+
+ return err;
+}
+
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ void *srqc;
+ void *xrc_srqc;
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (!err) {
+ xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
+ xrc_srq_context_entry);
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+ }
+
+ return err;
+}
+
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
+{
+ u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+ u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
+ MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
+ MLX5_SET(arm_xrc_srq_in, in, op_mod,
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
new file mode 100644
index 000000000000..90322c11361f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __TRANSOBJ_H__
+#define __TRANSOBJ_H__
+
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqn);
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *sqn);
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tirn);
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tisn);
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn);
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn);
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+
+#endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 5a89bb1d678a..9ef85873ceea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -175,12 +175,13 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
for (i = 0; i < tot_uuars; i++) {
bf = &uuari->bfs[i];
- bf->buf_size = dev->caps.gen.bf_reg_size / 2;
+ bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
bf->reg = NULL; /* Add WC support */
- bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size +
- MLX5_BF_OFFSET;
+ bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
+ (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
+ MLX5_BF_OFFSET;
bf->need_lock = need_uuar_lock(i);
spin_lock_init(&bf->lock);
spin_lock_init(&bf->lock32);
@@ -223,3 +224,40 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
return 0;
}
+
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+ phys_addr_t pfn;
+ phys_addr_t uar_bar_start;
+ int err;
+
+ err = mlx5_cmd_alloc_uar(mdev, &uar->index);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
+ return err;
+ }
+
+ uar_bar_start = pci_resource_start(mdev->pdev, 0);
+ pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index;
+ uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!uar->map) {
+ mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
+ err = -ENOMEM;
+ goto err_free_uar;
+ }
+
+ return 0;
+
+err_free_uar:
+ mlx5_cmd_free_uar(mdev, uar->index);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_alloc_map_uar);
+
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+ iounmap(uar->map);
+ mlx5_cmd_free_uar(mdev, uar->index);
+}
+EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
new file mode 100644
index 000000000000..b94177ebcf3a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_core.h"
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
+{
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_vport_state_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_STATE);
+ MLX5_SET(query_vport_state_in, in, op_mod, opmod);
+
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
+
+ return MLX5_GET(query_vport_state_out, out, state);
+}
+EXPORT_SYMBOL(mlx5_query_vport_state);
+
+void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
+{
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u8 *out_addr;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return;
+
+ out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+ nic_vport_context.permanent_address);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+
+ memset(out, 0, outlen);
+ mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+
+ ether_addr_copy(addr, &out_addr[2]);
+
+ kvfree(out);
+}
+EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address);
+
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 gid_index,
+ union ib_gid *gid)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+ int is_group_manager;
+ void *out = NULL;
+ void *in = NULL;
+ union ib_gid *tmp;
+ int tbsz;
+ int nout;
+ int err;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+ tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
+ mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
+ vf_num, gid_index, tbsz);
+
+ if (gid_index > tbsz && gid_index != 0xffff)
+ return -EINVAL;
+
+ if (gid_index == 0xffff)
+ nout = tbsz;
+ else
+ nout = 1;
+
+ out_sz += nout * sizeof(*gid);
+
+ in = kzalloc(in_sz, GFP_KERNEL);
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
+ MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
+ } else {
+ err = -EPERM;
+ goto out;
+ }
+ }
+ MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ if (err)
+ goto out;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto out;
+
+ tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+ gid->global.subnet_prefix = tmp->global.subnet_prefix;
+ gid->global.interface_id = tmp->global.interface_id;
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
+
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 pkey_index,
+ u16 *pkey)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
+ int is_group_manager;
+ void *out = NULL;
+ void *in = NULL;
+ void *pkarr;
+ int nout;
+ int tbsz;
+ int err;
+ int i;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+ tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
+ if (pkey_index > tbsz && pkey_index != 0xffff)
+ return -EINVAL;
+
+ if (pkey_index == 0xffff)
+ nout = tbsz;
+ else
+ nout = 1;
+
+ out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
+
+ in = kzalloc(in_sz, GFP_KERNEL);
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
+ MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
+ } else {
+ err = -EPERM;
+ goto out;
+ }
+ }
+ MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ if (err)
+ goto out;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto out;
+
+ pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
+ for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
+ *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
+
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+ u8 other_vport, u8 port_num,
+ u16 vf_num,
+ struct mlx5_hca_vport_context *rep)
+{
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+ int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
+ int is_group_manager;
+ void *out;
+ void *ctx;
+ int err;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+ memset(in, 0, sizeof(in));
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
+
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
+ MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
+ } else {
+ err = -EPERM;
+ goto ex;
+ }
+ }
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto ex;
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto ex;
+
+ ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
+ rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
+ rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
+ rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
+ rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
+ rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
+ rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
+ port_physical_state);
+ rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
+ rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
+ port_physical_state);
+ rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
+ rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
+ rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
+ rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
+ cap_mask1_field_select);
+ rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
+ rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
+ cap_mask2_field_select);
+ rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
+ rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
+ init_type_reply);
+ rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
+ rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
+ subnet_timeout);
+ rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
+ rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
+ rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+ qkey_violation_counter);
+ rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+ pkey_violation_counter);
+ rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
+ rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
+ system_image_guid);
+
+ex:
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
+
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+ u64 *sys_image_guid)
+{
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+ if (!err)
+ *sys_image_guid = rep->sys_image_guid;
+
+ kfree(rep);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
+
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+ u64 *node_guid)
+{
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+ if (!err)
+ *node_guid = rep->node_guid;
+
+ kfree(rep);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
new file mode 100644
index 000000000000..8388411582cf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "wq.h"
+#include "mlx5_core.h"
+
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
+{
+ return (u32)wq->sz_m1 + 1;
+}
+
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
+{
+ return wq->sz_m1 + 1;
+}
+
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
+{
+ return (u32)wq->sz_m1 + 1;
+}
+
+static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
+{
+ return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
+{
+ return mlx5_cqwq_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
+{
+ return mlx5_wq_ll_get_size(wq) << wq->log_stride;
+}
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_cyc *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ int err;
+
+ wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *cqc, struct mlx5_cqwq *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ int err;
+
+ wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
+ wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
+ wq->sz_m1 = (1 << wq->log_sz) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_ll *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ struct mlx5_wqe_srq_next_seg *next_seg;
+ int err;
+ int i;
+
+ wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ for (i = 0; i < wq->sz_m1; i++) {
+ next_seg = mlx5_wq_ll_get_wqe(wq, i);
+ next_seg->next_wqe_index = cpu_to_be16(i + 1);
+ }
+ next_seg = mlx5_wq_ll_get_wqe(wq, i);
+ wq->tail_next = &next_seg->next_wqe_index;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
+{
+ mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
+ mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
new file mode 100644
index 000000000000..e0ddd69fb429
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_WQ_H__
+#define __MLX5_WQ_H__
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+struct mlx5_wq_param {
+ int linear;
+ int numa;
+};
+
+struct mlx5_wq_ctrl {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_buf buf;
+ struct mlx5_db db;
+};
+
+struct mlx5_wq_cyc {
+ void *buf;
+ __be32 *db;
+ u16 sz_m1;
+ u8 log_stride;
+};
+
+struct mlx5_cqwq {
+ void *buf;
+ __be32 *db;
+ u32 sz_m1;
+ u32 cc; /* consumer counter */
+ u8 log_sz;
+ u8 log_stride;
+};
+
+struct mlx5_wq_ll {
+ void *buf;
+ __be32 *db;
+ __be16 *tail_next;
+ u16 sz_m1;
+ u16 head;
+ u16 wqe_ctr;
+ u16 cur_sz;
+ u8 log_stride;
+};
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_cyc *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *cqc, struct mlx5_cqwq *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_ll *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+
+static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
+{
+ return ctr & wq->sz_m1;
+}
+
+static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
+{
+ int equal = (cc1 == cc2);
+ int smaller = 0x8000 & (cc1 - cc2);
+
+ return !equal && !smaller;
+}
+
+static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
+{
+ return wq->cc & wq->sz_m1;
+}
+
+static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
+{
+ return wq->cc >> wq->log_sz;
+}
+
+static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
+{
+ wq->cc++;
+}
+
+static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
+{
+ *wq->db = cpu_to_be32(wq->cc & 0xffffff);
+}
+
+static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
+{
+ return wq->cur_sz == wq->sz_m1;
+}
+
+static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
+{
+ return !wq->cur_sz;
+}
+
+static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
+{
+ wq->head = head_next;
+ wq->wqe_ctr++;
+ wq->cur_sz++;
+}
+
+static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
+ __be16 *next_tail_next)
+{
+ *wq->tail_next = ix;
+ wq->tail_next = next_tail_next;
+ wq->cur_sz--;
+}
+
+static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
+{
+ *wq->db = cpu_to_be32(wq->wqe_ctr);
+}
+
+#endif /* __MLX5_WQ_H__ */
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 6f332ebdf3b5..75dc46c5fca2 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -6664,7 +6664,7 @@ static void mib_read_work(struct work_struct *work)
wake_up_interruptible(
&hw_priv->counter[i].counter);
}
- } else if (jiffies >= hw_priv->counter[i].time) {
+ } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) {
/* Only read MIB counters when the port is connected. */
if (media_connected == mib->state)
hw_priv->counter[i].read = 1;
@@ -6689,7 +6689,7 @@ static void mib_monitor(unsigned long ptr)
/* This is used to verify Wake-on-LAN is working. */
if (hw_priv->pme_wait) {
- if (hw_priv->pme_wait <= jiffies) {
+ if (time_is_before_eq_jiffies(hw_priv->pme_wait)) {
hw_clr_wol_pme_status(&hw_priv->hw);
hw_priv->pme_wait = 0;
}
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 81d0f1c86d6d..becbb5f1f5a7 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -244,7 +244,6 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
napi_gro_receive(&priv->napi, skb);
rx++;
- ndev->last_rx = jiffies;
priv->stats.rx_packets++;
priv->stats.rx_bytes += len;
if (desc0 & RX_DESC0_MULTICAST)
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 1e0f72b65459..c28111749e1f 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5308,7 +5308,8 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
/**
* s2io_ethtool_sset - Sets different link parameters.
- * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
* @info: pointer to the structure with parameters given by ethtool to set
* link information.
* Description:
@@ -5793,7 +5794,8 @@ static void s2io_vpd_read(struct s2io_nic *nic)
/**
* s2io_ethtool_geeprom - reads the value stored in the Eeprom.
- * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
* @eeprom : pointer to the user level structure provided by ethtool,
* containing all relevant information.
* @data_buf : user defined value to be written into Eeprom.
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f221126a5c4e..055f3763e577 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1326,9 +1326,6 @@ struct qlcnic_eswitch {
};
-/* Return codes for Error handling */
-#define QL_STATUS_INVALID_PARAM -1
-
#define MAX_BW 100 /* % of link speed */
#define MIN_BW 1 /* % of link speed */
#define MAX_VLAN_ID 4095
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 367f3976df56..2f6cc423ab1d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1031,7 +1031,7 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
pfn = pci_info[i].id;
if (pfn >= ahw->max_vnic_func) {
- ret = QL_STATUS_INVALID_PARAM;
+ ret = -EINVAL;
dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
__func__, pfn, ahw->max_vnic_func);
goto err_eswitch;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 59a721fba018..05c28f2c6df7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -24,8 +24,6 @@
#include <linux/hwmon-sysfs.h>
#endif
-#define QLC_STATUS_UNSUPPORTED_CMD -2
-
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
{
return -EOPNOTSUPP;
@@ -166,7 +164,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
u8 b_state, b_rate;
if (len != sizeof(u16))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memcpy(&beacon, buf, sizeof(u16));
err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
@@ -383,17 +381,17 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
dest_pci_func = pm_cfg[i].dest_npar;
src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
if (src_index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
if (dest_index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
s_esw_id = adapter->npars[src_index].phy_port;
d_esw_id = adapter->npars[dest_index].phy_port;
if (s_esw_id != d_esw_id)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
return 0;
@@ -414,7 +412,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
count = size / sizeof(struct qlcnic_pm_func_cfg);
rem = size % sizeof(struct qlcnic_pm_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
@@ -427,7 +425,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
action = !!pm_cfg[i].action;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
id = adapter->npars[index].phy_port;
ret = qlcnic_config_port_mirroring(adapter, id,
@@ -440,7 +438,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
pci_func = pm_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
id = adapter->npars[index].phy_port;
adapter->npars[index].enable_pm = !!pm_cfg[i].action;
adapter->npars[index].dest_npar = id;
@@ -499,11 +497,11 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
if (pci_func >= ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -517,25 +515,25 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
if (ret != QLCNIC_NON_PRIV_FUNC) {
if (esw_cfg[i].mac_anti_spoof != 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (esw_cfg[i].mac_override != 1)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (esw_cfg[i].promisc_mode != 1)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
break;
case QLCNIC_ADD_VLAN:
if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (!esw_cfg[i].op_type)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
break;
case QLCNIC_DEL_VLAN:
if (!esw_cfg[i].op_type)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
break;
default:
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
}
@@ -559,7 +557,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
count = size / sizeof(struct qlcnic_esw_func_cfg);
rem = size % sizeof(struct qlcnic_esw_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
@@ -570,7 +568,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
for (i = 0; i < count; i++) {
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
continue;
@@ -604,7 +602,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
pci_func = esw_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
npar = &adapter->npars[index];
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -654,7 +652,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
esw_cfg[pci_func].pci_func = pci_func;
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
return size;
@@ -669,11 +667,11 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (!IS_VALID_BW(np_cfg[i].min_bw) ||
!IS_VALID_BW(np_cfg[i].max_bw))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
return 0;
}
@@ -694,7 +692,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
count = size / sizeof(struct qlcnic_npar_func_cfg);
rem = size % sizeof(struct qlcnic_npar_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
np_cfg = (struct qlcnic_npar_func_cfg *)buf;
@@ -717,7 +715,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
return ret;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
adapter->npars[index].min_bw = nic_info.min_tx_bw;
adapter->npars[index].max_bw = nic_info.max_tx_bw;
}
@@ -784,13 +782,13 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (size != sizeof(struct qlcnic_esw_statistics))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (offset >= adapter->ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memset(&port_stats, 0, size);
ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
@@ -819,13 +817,13 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (size != sizeof(struct qlcnic_esw_statistics))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memset(&esw_stats, 0, size);
ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
@@ -853,10 +851,10 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
QLCNIC_QUERY_RX_COUNTER);
@@ -883,10 +881,10 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (offset >= adapter->ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
QLCNIC_QUERY_RX_COUNTER);
@@ -953,9 +951,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
if (!size)
- return QL_STATUS_INVALID_PARAM;
- if (!buf)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
count = size / sizeof(u32);
@@ -1132,9 +1128,6 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- if (!buf)
- return QL_STATUS_INVALID_PARAM;
-
ret = kstrtoul(buf, 16, &data);
switch (data) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 25800a1dedcb..02b7115b6aaa 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3871,9 +3871,6 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
return status;
}
- end_jiffies = jiffies +
- max((unsigned long)1, usecs_to_jiffies(30));
-
/* Check if bit is set then skip the mailbox command and
* clear the bit, else we are in normal reset process.
*/
@@ -3888,6 +3885,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
+ end_jiffies = jiffies + usecs_to_jiffies(30);
do {
value = ql_read32(qdev, RST_FO);
if ((value & RST_FO_FR) == 0)
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 6af028d5f9bc..2f87909f5186 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -839,7 +839,7 @@ static const struct of_device_id qca_spi_of_match[] = {
MODULE_DEVICE_TABLE(of, qca_spi_of_match);
static int
-qca_spi_probe(struct spi_device *spi_device)
+qca_spi_probe(struct spi_device *spi)
{
struct qcaspi *qca = NULL;
struct net_device *qcaspi_devs = NULL;
@@ -847,52 +847,52 @@ qca_spi_probe(struct spi_device *spi_device)
u16 signature;
const char *mac;
- if (!spi_device->dev.of_node) {
- dev_err(&spi_device->dev, "Missing device tree\n");
+ if (!spi->dev.of_node) {
+ dev_err(&spi->dev, "Missing device tree\n");
return -EINVAL;
}
- legacy_mode = of_property_read_bool(spi_device->dev.of_node,
+ legacy_mode = of_property_read_bool(spi->dev.of_node,
"qca,legacy-mode");
if (qcaspi_clkspeed == 0) {
- if (spi_device->max_speed_hz)
- qcaspi_clkspeed = spi_device->max_speed_hz;
+ if (spi->max_speed_hz)
+ qcaspi_clkspeed = spi->max_speed_hz;
else
qcaspi_clkspeed = QCASPI_CLK_SPEED;
}
if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
(qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
- dev_info(&spi_device->dev, "Invalid clkspeed: %d\n",
+ dev_info(&spi->dev, "Invalid clkspeed: %d\n",
qcaspi_clkspeed);
return -EINVAL;
}
if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
(qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
- dev_info(&spi_device->dev, "Invalid burst len: %d\n",
+ dev_info(&spi->dev, "Invalid burst len: %d\n",
qcaspi_burst_len);
return -EINVAL;
}
if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
(qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
- dev_info(&spi_device->dev, "Invalid pluggable: %d\n",
+ dev_info(&spi->dev, "Invalid pluggable: %d\n",
qcaspi_pluggable);
return -EINVAL;
}
- dev_info(&spi_device->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
+ dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
QCASPI_DRV_VERSION,
qcaspi_clkspeed,
qcaspi_burst_len,
qcaspi_pluggable);
- spi_device->mode = SPI_MODE_3;
- spi_device->max_speed_hz = qcaspi_clkspeed;
- if (spi_setup(spi_device) < 0) {
- dev_err(&spi_device->dev, "Unable to setup SPI device\n");
+ spi->mode = SPI_MODE_3;
+ spi->max_speed_hz = qcaspi_clkspeed;
+ if (spi_setup(spi) < 0) {
+ dev_err(&spi->dev, "Unable to setup SPI device\n");
return -EFAULT;
}
@@ -905,23 +905,23 @@ qca_spi_probe(struct spi_device *spi_device)
qca = netdev_priv(qcaspi_devs);
if (!qca) {
free_netdev(qcaspi_devs);
- dev_err(&spi_device->dev, "Fail to retrieve private structure\n");
+ dev_err(&spi->dev, "Fail to retrieve private structure\n");
return -ENOMEM;
}
qca->net_dev = qcaspi_devs;
- qca->spi_dev = spi_device;
+ qca->spi_dev = spi;
qca->legacy_mode = legacy_mode;
- spi_set_drvdata(spi_device, qcaspi_devs);
+ spi_set_drvdata(spi, qcaspi_devs);
- mac = of_get_mac_address(spi_device->dev.of_node);
+ mac = of_get_mac_address(spi->dev.of_node);
if (mac)
ether_addr_copy(qca->net_dev->dev_addr, mac);
if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
eth_hw_addr_random(qca->net_dev);
- dev_info(&spi_device->dev, "Using random MAC address: %pM\n",
+ dev_info(&spi->dev, "Using random MAC address: %pM\n",
qca->net_dev->dev_addr);
}
@@ -932,7 +932,7 @@ qca_spi_probe(struct spi_device *spi_device)
qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
if (signature != QCASPI_GOOD_SIGNATURE) {
- dev_err(&spi_device->dev, "Invalid signature (0x%04X)\n",
+ dev_err(&spi->dev, "Invalid signature (0x%04X)\n",
signature);
free_netdev(qcaspi_devs);
return -EFAULT;
@@ -940,7 +940,7 @@ qca_spi_probe(struct spi_device *spi_device)
}
if (register_netdev(qcaspi_devs)) {
- dev_info(&spi_device->dev, "Unable to register net device %s\n",
+ dev_info(&spi->dev, "Unable to register net device %s\n",
qcaspi_devs->name);
free_netdev(qcaspi_devs);
return -EFAULT;
@@ -952,9 +952,9 @@ qca_spi_probe(struct spi_device *spi_device)
}
static int
-qca_spi_remove(struct spi_device *spi_device)
+qca_spi_remove(struct spi_device *spi)
{
- struct net_device *qcaspi_devs = spi_get_drvdata(spi_device);
+ struct net_device *qcaspi_devs = spi_get_drvdata(spi);
struct qcaspi *qca = netdev_priv(qcaspi_devs);
qcaspi_remove_device_debugfs(qca);
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index cf98cc9bbc8d..819289e5be4f 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -181,7 +181,7 @@ struct rocker_desc_info {
size_t data_size;
size_t tlv_size;
struct rocker_desc *desc;
- DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ dma_addr_t mapaddr;
};
struct rocker_dma_ring_info {
@@ -225,6 +225,7 @@ struct rocker_port {
struct napi_struct napi_rx;
struct rocker_dma_ring_info tx_ring;
struct rocker_dma_ring_info rx_ring;
+ struct list_head trans_mem;
};
struct rocker {
@@ -236,21 +237,21 @@ struct rocker {
struct {
u64 id;
} hw;
- spinlock_t cmd_ring_lock;
+ spinlock_t cmd_ring_lock; /* for cmd ring accesses */
struct rocker_dma_ring_info cmd_ring;
struct rocker_dma_ring_info event_ring;
DECLARE_HASHTABLE(flow_tbl, 16);
- spinlock_t flow_tbl_lock;
+ spinlock_t flow_tbl_lock; /* for flow tbl accesses */
u64 flow_tbl_next_cookie;
DECLARE_HASHTABLE(group_tbl, 16);
- spinlock_t group_tbl_lock;
+ spinlock_t group_tbl_lock; /* for group tbl accesses */
DECLARE_HASHTABLE(fdb_tbl, 16);
- spinlock_t fdb_tbl_lock;
+ spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
DECLARE_HASHTABLE(internal_vlan_tbl, 8);
- spinlock_t internal_vlan_tbl_lock;
+ spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
DECLARE_HASHTABLE(neigh_tbl, 16);
- spinlock_t neigh_tbl_lock;
+ spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
u32 neigh_tbl_next_index;
};
@@ -294,7 +295,7 @@ static bool rocker_vlan_id_is_internal(__be16 vlan_id)
return (_vlan_id >= start && _vlan_id <= end);
}
-static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
+static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
u16 vid, bool *pop_vlan)
{
__be16 vlan_id;
@@ -311,7 +312,7 @@ static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
return vlan_id;
}
-static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
+static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
__be16 vlan_id)
{
if (rocker_vlan_id_is_internal(vlan_id))
@@ -320,21 +321,87 @@ static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
return ntohs(vlan_id);
}
-static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
+static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
{
return !!rocker_port->bridge_dev;
}
+static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, size_t size)
+{
+ struct list_head *elem = NULL;
+
+ /* If in transaction prepare phase, allocate the memory
+ * and enqueue it on a per-port list. If in transaction
+ * commit phase, dequeue the memory from the per-port list
+ * rather than re-allocating the memory. The idea is the
+ * driver code paths for prepare and commit are identical
+ * so the memory allocated in the prepare phase is the
+ * memory used in the commit phase.
+ */
+
+ switch (trans) {
+ case SWITCHDEV_TRANS_PREPARE:
+ elem = kzalloc(size + sizeof(*elem), GFP_KERNEL);
+ if (!elem)
+ return NULL;
+ list_add_tail(elem, &rocker_port->trans_mem);
+ break;
+ case SWITCHDEV_TRANS_COMMIT:
+ BUG_ON(list_empty(&rocker_port->trans_mem));
+ elem = rocker_port->trans_mem.next;
+ list_del_init(elem);
+ break;
+ case SWITCHDEV_TRANS_NONE:
+ elem = kzalloc(size + sizeof(*elem), GFP_KERNEL);
+ if (elem)
+ INIT_LIST_HEAD(elem);
+ break;
+ default:
+ break;
+ }
+
+ return elem ? elem + 1 : NULL;
+}
+
+static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, size_t size)
+{
+ return __rocker_port_mem_alloc(rocker_port, trans, size);
+}
+
+static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, size_t n,
+ size_t size)
+{
+ return __rocker_port_mem_alloc(rocker_port, trans, n * size);
+}
+
+static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
+{
+ struct list_head *elem;
+
+ /* Frees are ignored if in transaction prepare phase. The
+ * memory remains on the per-port list until freed in the
+ * commit phase.
+ */
+
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ return;
+
+ elem = (struct list_head *)mem - 1;
+ BUG_ON(!list_empty(elem));
+ kfree(elem);
+}
+
struct rocker_wait {
wait_queue_head_t wait;
bool done;
- bool nowait;
};
static void rocker_wait_reset(struct rocker_wait *wait)
{
wait->done = false;
- wait->nowait = false;
}
static void rocker_wait_init(struct rocker_wait *wait)
@@ -343,20 +410,22 @@ static void rocker_wait_init(struct rocker_wait *wait)
rocker_wait_reset(wait);
}
-static struct rocker_wait *rocker_wait_create(gfp_t gfp)
+static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
+ enum switchdev_trans trans)
{
struct rocker_wait *wait;
- wait = kmalloc(sizeof(*wait), gfp);
+ wait = rocker_port_kzalloc(rocker_port, trans, sizeof(*wait));
if (!wait)
return NULL;
rocker_wait_init(wait);
return wait;
}
-static void rocker_wait_destroy(struct rocker_wait *work)
+static void rocker_wait_destroy(enum switchdev_trans trans,
+ struct rocker_wait *wait)
{
- kfree(work);
+ rocker_port_kfree(trans, wait);
}
static bool rocker_wait_event_timeout(struct rocker_wait *wait,
@@ -374,18 +443,18 @@ static void rocker_wait_wake_up(struct rocker_wait *wait)
wake_up(&wait->wait);
}
-static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
+static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
{
return rocker->msix_entries[vector].vector;
}
-static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
+static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
{
return rocker_msix_vector(rocker_port->rocker,
ROCKER_MSIX_VEC_TX(rocker_port->port_number));
}
-static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
+static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
{
return rocker_msix_vector(rocker_port->rocker,
ROCKER_MSIX_VEC_RX(rocker_port->port_number));
@@ -404,9 +473,9 @@ static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
* HW basic testing functions
*****************************/
-static int rocker_reg_test(struct rocker *rocker)
+static int rocker_reg_test(const struct rocker *rocker)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
u64 test_reg;
u64 rnd;
@@ -434,12 +503,12 @@ static int rocker_reg_test(struct rocker *rocker)
return 0;
}
-static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
- u32 test_type, dma_addr_t dma_handle,
- unsigned char *buf, unsigned char *expect,
- size_t size)
+static int rocker_dma_test_one(const struct rocker *rocker,
+ struct rocker_wait *wait, u32 test_type,
+ dma_addr_t dma_handle, const unsigned char *buf,
+ const unsigned char *expect, size_t size)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
int i;
rocker_wait_reset(wait);
@@ -463,7 +532,7 @@ static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
-static int rocker_dma_test_offset(struct rocker *rocker,
+static int rocker_dma_test_offset(const struct rocker *rocker,
struct rocker_wait *wait, int offset)
{
struct pci_dev *pdev = rocker->pdev;
@@ -523,7 +592,8 @@ free_alloc:
return err;
}
-static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
+static int rocker_dma_test(const struct rocker *rocker,
+ struct rocker_wait *wait)
{
int i;
int err;
@@ -545,9 +615,9 @@ static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int rocker_basic_hw_test(struct rocker *rocker)
+static int rocker_basic_hw_test(const struct rocker *rocker)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
struct rocker_wait wait;
int err;
@@ -680,7 +750,7 @@ static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
return *(u64 *) rocker_tlv_data(tlv);
}
-static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
+static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
const char *buf, int buf_len)
{
const struct rocker_tlv *tlv;
@@ -693,19 +763,19 @@ static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
u32 type = rocker_tlv_type(tlv);
if (type > 0 && type <= maxtype)
- tb[type] = (struct rocker_tlv *) tlv;
+ tb[type] = tlv;
}
}
-static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
+static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
const struct rocker_tlv *tlv)
{
rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
rocker_tlv_len(tlv));
}
-static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
- struct rocker_desc_info *desc_info)
+static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
+ const struct rocker_desc_info *desc_info)
{
rocker_tlv_parse(tb, maxtype, desc_info->data,
desc_info->desc->tlv_size);
@@ -790,9 +860,9 @@ static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
}
static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
- struct rocker_tlv *start)
+ const struct rocker_tlv *start)
{
- desc_info->tlv_size = (char *) start - desc_info->data;
+ desc_info->tlv_size = (const char *) start - desc_info->data;
}
/******************************************
@@ -804,7 +874,7 @@ static u32 __pos_inc(u32 pos, size_t limit)
return ++pos == limit ? 0 : pos;
}
-static int rocker_desc_err(struct rocker_desc_info *desc_info)
+static int rocker_desc_err(const struct rocker_desc_info *desc_info)
{
int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
@@ -832,31 +902,31 @@ static int rocker_desc_err(struct rocker_desc_info *desc_info)
return -EINVAL;
}
-static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
+static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
{
desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
}
-static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
+static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
{
u32 comp_err = desc_info->desc->comp_err;
return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
}
-static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
+static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
{
return (void *)(uintptr_t)desc_info->desc->cookie;
}
-static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
+static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
void *ptr)
{
desc_info->desc->cookie = (uintptr_t) ptr;
}
static struct rocker_desc_info *
-rocker_desc_head_get(struct rocker_dma_ring_info *info)
+rocker_desc_head_get(const struct rocker_dma_ring_info *info)
{
static struct rocker_desc_info *desc_info;
u32 head = __pos_inc(info->head, info->size);
@@ -868,15 +938,15 @@ rocker_desc_head_get(struct rocker_dma_ring_info *info)
return desc_info;
}
-static void rocker_desc_commit(struct rocker_desc_info *desc_info)
+static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
{
desc_info->desc->buf_size = desc_info->data_size;
desc_info->desc->tlv_size = desc_info->tlv_size;
}
-static void rocker_desc_head_set(struct rocker *rocker,
+static void rocker_desc_head_set(const struct rocker *rocker,
struct rocker_dma_ring_info *info,
- struct rocker_desc_info *desc_info)
+ const struct rocker_desc_info *desc_info)
{
u32 head = __pos_inc(info->head, info->size);
@@ -901,8 +971,8 @@ rocker_desc_tail_get(struct rocker_dma_ring_info *info)
return desc_info;
}
-static void rocker_dma_ring_credits_set(struct rocker *rocker,
- struct rocker_dma_ring_info *info,
+static void rocker_dma_ring_credits_set(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
u32 credits)
{
if (credits)
@@ -915,7 +985,7 @@ static unsigned long rocker_dma_ring_size_fix(size_t size)
min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
}
-static int rocker_dma_ring_create(struct rocker *rocker,
+static int rocker_dma_ring_create(const struct rocker *rocker,
unsigned int type,
size_t size,
struct rocker_dma_ring_info *info)
@@ -951,8 +1021,8 @@ static int rocker_dma_ring_create(struct rocker *rocker,
return 0;
}
-static void rocker_dma_ring_destroy(struct rocker *rocker,
- struct rocker_dma_ring_info *info)
+static void rocker_dma_ring_destroy(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info)
{
rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
@@ -962,7 +1032,7 @@ static void rocker_dma_ring_destroy(struct rocker *rocker,
kfree(info->desc_info);
}
-static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
+static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
struct rocker_dma_ring_info *info)
{
int i;
@@ -977,8 +1047,8 @@ static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
rocker_desc_commit(&info->desc_info[i]);
}
-static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
- struct rocker_dma_ring_info *info,
+static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
int direction, size_t buf_size)
{
struct pci_dev *pdev = rocker->pdev;
@@ -1015,7 +1085,7 @@ static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
rollback:
for (i--; i >= 0; i--) {
- struct rocker_desc_info *desc_info = &info->desc_info[i];
+ const struct rocker_desc_info *desc_info = &info->desc_info[i];
pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
desc_info->data_size, direction);
@@ -1024,15 +1094,15 @@ rollback:
return err;
}
-static void rocker_dma_ring_bufs_free(struct rocker *rocker,
- struct rocker_dma_ring_info *info,
+static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
int direction)
{
struct pci_dev *pdev = rocker->pdev;
int i;
for (i = 0; i < info->size; i++) {
- struct rocker_desc_info *desc_info = &info->desc_info[i];
+ const struct rocker_desc_info *desc_info = &info->desc_info[i];
struct rocker_desc *desc = &info->desc[i];
desc->buf_addr = 0;
@@ -1045,7 +1115,7 @@ static void rocker_dma_ring_bufs_free(struct rocker *rocker,
static int rocker_dma_rings_init(struct rocker *rocker)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
int err;
err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
@@ -1102,11 +1172,11 @@ static void rocker_dma_rings_fini(struct rocker *rocker)
rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
}
-static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
struct sk_buff *skb, size_t buf_len)
{
+ const struct rocker *rocker = rocker_port->rocker;
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
@@ -1126,13 +1196,12 @@ tlv_put_failure:
return -EMSGSIZE;
}
-static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
+static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
{
return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
}
-static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info)
{
struct net_device *dev = rocker_port->dev;
@@ -1149,8 +1218,7 @@ static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
skb = netdev_alloc_skb_ip_align(dev, buf_len);
if (!skb)
return -ENOMEM;
- err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
- skb, buf_len);
+ err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
if (err) {
dev_kfree_skb_any(skb);
return err;
@@ -1159,8 +1227,8 @@ static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
return 0;
}
-static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
- struct rocker_tlv **attrs)
+static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
+ const struct rocker_tlv **attrs)
{
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
@@ -1174,10 +1242,10 @@ static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
}
-static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
- struct rocker_desc_info *desc_info)
+static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
+ const struct rocker_desc_info *desc_info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
if (!skb)
@@ -1187,15 +1255,15 @@ static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
dev_kfree_skb_any(skb);
}
-static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
- struct rocker_port *rocker_port)
+static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
{
- struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker *rocker = rocker_port->rocker;
int i;
int err;
for (i = 0; i < rx_ring->size; i++) {
- err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
+ err = rocker_dma_rx_ring_skb_alloc(rocker_port,
&rx_ring->desc_info[i]);
if (err)
goto rollback;
@@ -1208,10 +1276,10 @@ rollback:
return err;
}
-static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
- struct rocker_port *rocker_port)
+static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
{
- struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker *rocker = rocker_port->rocker;
int i;
for (i = 0; i < rx_ring->size; i++)
@@ -1257,7 +1325,7 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
goto err_dma_rx_ring_bufs_alloc;
}
- err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
+ err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
if (err) {
netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
goto err_dma_rx_ring_skbs_alloc;
@@ -1283,7 +1351,7 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
{
struct rocker *rocker = rocker_port->rocker;
- rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
+ rocker_dma_rx_ring_skbs_free(rocker_port);
rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
PCI_DMA_BIDIRECTIONAL);
rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
@@ -1292,7 +1360,8 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
}
-static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
+static void rocker_port_set_enable(const struct rocker_port *rocker_port,
+ bool enable)
{
u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
@@ -1310,19 +1379,14 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
{
struct rocker *rocker = dev_id;
- struct rocker_desc_info *desc_info;
+ const struct rocker_desc_info *desc_info;
struct rocker_wait *wait;
u32 credits = 0;
spin_lock(&rocker->cmd_ring_lock);
while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
wait = rocker_desc_cookie_ptr_get(desc_info);
- if (wait->nowait) {
- rocker_desc_gen_clear(desc_info);
- rocker_wait_destroy(wait);
- } else {
- rocker_wait_wake_up(wait);
- }
+ rocker_wait_wake_up(wait);
credits++;
}
spin_unlock(&rocker->cmd_ring_lock);
@@ -1331,22 +1395,22 @@ static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rocker_port_link_up(struct rocker_port *rocker_port)
+static void rocker_port_link_up(const struct rocker_port *rocker_port)
{
netif_carrier_on(rocker_port->dev);
netdev_info(rocker_port->dev, "Link is up\n");
}
-static void rocker_port_link_down(struct rocker_port *rocker_port)
+static void rocker_port_link_down(const struct rocker_port *rocker_port)
{
netif_carrier_off(rocker_port->dev);
netdev_info(rocker_port->dev, "Link is down\n");
}
-static int rocker_event_link_change(struct rocker *rocker,
+static int rocker_event_link_change(const struct rocker *rocker,
const struct rocker_tlv *info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
unsigned int port_number;
bool link_up;
struct rocker_port *rocker_port;
@@ -1374,22 +1438,44 @@ static int rocker_event_link_change(struct rocker *rocker,
}
#define ROCKER_OP_FLAG_REMOVE BIT(0)
-#define ROCKER_OP_FLAG_NOWAIT BIT(1)
-#define ROCKER_OP_FLAG_LEARNED BIT(2)
-#define ROCKER_OP_FLAG_REFRESH BIT(3)
+#define ROCKER_OP_FLAG_LEARNED BIT(1)
+#define ROCKER_OP_FLAG_REFRESH BIT(2)
static int rocker_port_fdb(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
const unsigned char *addr,
__be16 vlan_id, int flags);
-static int rocker_event_mac_vlan_seen(struct rocker *rocker,
+struct rocker_mac_vlan_seen_work {
+ struct work_struct work;
+ struct rocker_port *rocker_port;
+ int flags;
+ unsigned char addr[ETH_ALEN];
+ __be16 vlan_id;
+};
+
+static void rocker_event_mac_vlan_seen_work(struct work_struct *work)
+{
+ const struct rocker_mac_vlan_seen_work *sw =
+ container_of(work, struct rocker_mac_vlan_seen_work, work);
+
+ rtnl_lock();
+ rocker_port_fdb(sw->rocker_port, SWITCHDEV_TRANS_NONE,
+ sw->addr, sw->vlan_id, sw->flags);
+ rtnl_unlock();
+
+ kfree(work);
+}
+
+static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
const struct rocker_tlv *info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
+ struct rocker_mac_vlan_seen_work *sw;
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
unsigned int port_number;
struct rocker_port *rocker_port;
- unsigned char *addr;
- int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
+ const unsigned char *addr;
+ int flags = ROCKER_OP_FLAG_LEARNED;
__be16 vlan_id;
rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
@@ -1411,14 +1497,27 @@ static int rocker_event_mac_vlan_seen(struct rocker *rocker,
rocker_port->stp_state != BR_STATE_FORWARDING)
return 0;
- return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+ sw = kmalloc(sizeof(*sw), GFP_ATOMIC);
+ if (!sw)
+ return -ENOMEM;
+
+ INIT_WORK(&sw->work, rocker_event_mac_vlan_seen_work);
+
+ sw->rocker_port = rocker_port;
+ sw->flags = flags;
+ ether_addr_copy(sw->addr, addr);
+ sw->vlan_id = vlan_id;
+
+ schedule_work(&sw->work);
+
+ return 0;
}
-static int rocker_event_process(struct rocker *rocker,
- struct rocker_desc_info *desc_info)
+static int rocker_event_process(const struct rocker *rocker,
+ const struct rocker_desc_info *desc_info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
- struct rocker_tlv *info;
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
+ const struct rocker_tlv *info;
u16 type;
rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
@@ -1442,8 +1541,8 @@ static int rocker_event_process(struct rocker *rocker,
static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
{
struct rocker *rocker = dev_id;
- struct pci_dev *pdev = rocker->pdev;
- struct rocker_desc_info *desc_info;
+ const struct pci_dev *pdev = rocker->pdev;
+ const struct rocker_desc_info *desc_info;
u32 credits = 0;
int err;
@@ -1487,65 +1586,70 @@ static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
* Command interface
********************/
-typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv);
+typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv);
+
+typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
+ void *priv);
-static int rocker_cmd_exec(struct rocker *rocker,
- struct rocker_port *rocker_port,
- rocker_cmd_cb_t prepare, void *prepare_priv,
- rocker_cmd_cb_t process, void *process_priv,
- bool nowait)
+static int rocker_cmd_exec(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ rocker_cmd_prep_cb_t prepare, void *prepare_priv,
+ rocker_cmd_proc_cb_t process, void *process_priv)
{
+ struct rocker *rocker = rocker_port->rocker;
struct rocker_desc_info *desc_info;
struct rocker_wait *wait;
unsigned long flags;
int err;
- wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
+ wait = rocker_wait_create(rocker_port, trans);
if (!wait)
return -ENOMEM;
- wait->nowait = nowait;
spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
+
desc_info = rocker_desc_head_get(&rocker->cmd_ring);
if (!desc_info) {
spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
err = -EAGAIN;
goto out;
}
- err = prepare(rocker, rocker_port, desc_info, prepare_priv);
+
+ err = prepare(rocker_port, desc_info, prepare_priv);
if (err) {
spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
goto out;
}
+
rocker_desc_cookie_ptr_set(desc_info, wait);
- rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
- spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
- if (nowait)
- return 0;
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
- if (!rocker_wait_event_timeout(wait, HZ / 10))
- return -EIO;
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!rocker_wait_event_timeout(wait, HZ / 10))
+ return -EIO;
err = rocker_desc_err(desc_info);
if (err)
return err;
if (process)
- err = process(rocker, rocker_port, desc_info, process_priv);
+ err = process(rocker_port, desc_info, process_priv);
rocker_desc_gen_clear(desc_info);
out:
- rocker_wait_destroy(wait);
+ rocker_wait_destroy(trans, wait);
return err;
}
static int
-rocker_cmd_get_port_settings_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -1565,14 +1669,13 @@ rocker_cmd_get_port_settings_prep(struct rocker *rocker,
}
static int
-rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
struct ethtool_cmd *ecmd = priv;
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
u32 speed;
u8 duplex;
u8 autoneg;
@@ -1604,15 +1707,14 @@ rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
}
static int
-rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
unsigned char *macaddr = priv;
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
- struct rocker_tlv *attr;
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attr;
rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
if (!attrs[ROCKER_TLV_CMD_INFO])
@@ -1637,17 +1739,16 @@ struct port_name {
};
static int
-rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
- struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
struct port_name *name = priv;
- struct rocker_tlv *attr;
+ const struct rocker_tlv *attr;
size_t i, j, len;
- char *str;
+ const char *str;
rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
if (!attrs[ROCKER_TLV_CMD_INFO])
@@ -1679,8 +1780,7 @@ rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
}
static int
-rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -1710,12 +1810,11 @@ rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
}
static int
-rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
- unsigned char *macaddr = priv;
+ const unsigned char *macaddr = priv;
struct rocker_tlv *cmd_info;
if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
@@ -1735,8 +1834,7 @@ rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
}
static int
-rocker_cmd_set_port_learning_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -1761,46 +1859,48 @@ rocker_cmd_set_port_learning_prep(struct rocker *rocker,
static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
struct ethtool_cmd *ecmd)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_ethtool_proc,
- ecmd, false);
+ ecmd);
}
static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
unsigned char *macaddr)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_macaddr_proc,
- macaddr, false);
+ macaddr);
}
static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
struct ethtool_cmd *ecmd)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_set_port_settings_ethtool_prep,
- ecmd, NULL, NULL, false);
+ ecmd, NULL, NULL);
}
static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
unsigned char *macaddr)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_set_port_settings_macaddr_prep,
- macaddr, NULL, NULL, false);
+ macaddr, NULL, NULL);
}
-static int rocker_port_set_learning(struct rocker_port *rocker_port)
+static int rocker_port_set_learning(struct rocker_port *rocker_port,
+ enum switchdev_trans trans)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, trans,
rocker_cmd_set_port_learning_prep,
- NULL, NULL, NULL, false);
+ NULL, NULL, NULL);
}
-static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.ig_port.in_pport))
@@ -1815,8 +1915,9 @@ static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.vlan.in_pport))
@@ -1838,8 +1939,9 @@ static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.term_mac.in_pport))
@@ -1875,7 +1977,7 @@ static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
static int
rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
entry->key.ucast_routing.eth_type))
@@ -1896,8 +1998,9 @@ rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (entry->key.bridge.has_eth_dst &&
rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
@@ -1929,8 +2032,9 @@ static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.acl.in_pport))
@@ -1995,12 +2099,11 @@ static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
- struct rocker_flow_tbl_entry *entry = priv;
+ const struct rocker_flow_tbl_entry *entry = priv;
struct rocker_tlv *cmd_info;
int err = 0;
@@ -2053,8 +2156,7 @@ static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
return 0;
}
-static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -2090,7 +2192,7 @@ rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
static int
rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
- struct rocker_group_tbl_entry *entry)
+ const struct rocker_group_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
entry->l2_rewrite.group_id))
@@ -2113,7 +2215,7 @@ rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
static int
rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
- struct rocker_group_tbl_entry *entry)
+ const struct rocker_group_tbl_entry *entry)
{
int i;
struct rocker_tlv *group_ids;
@@ -2139,7 +2241,7 @@ rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
static int
rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
- struct rocker_group_tbl_entry *entry)
+ const struct rocker_group_tbl_entry *entry)
{
if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
@@ -2163,8 +2265,7 @@ rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_group_tbl_add(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -2209,8 +2310,7 @@ static int rocker_cmd_group_tbl_add(struct rocker *rocker,
return 0;
}
-static int rocker_cmd_group_tbl_del(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -2293,7 +2393,8 @@ static void rocker_free_tbls(struct rocker *rocker)
}
static struct rocker_flow_tbl_entry *
-rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
+rocker_flow_tbl_find(const struct rocker *rocker,
+ const struct rocker_flow_tbl_entry *match)
{
struct rocker_flow_tbl_entry *found;
size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
@@ -2308,8 +2409,8 @@ rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
}
static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
- struct rocker_flow_tbl_entry *match,
- bool nowait)
+ enum switchdev_trans trans,
+ struct rocker_flow_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_flow_tbl_entry *found;
@@ -2324,8 +2425,9 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
if (found) {
match->cookie = found->cookie;
- hash_del(&found->entry);
- kfree(found);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
+ rocker_port_kfree(trans, found);
found = match;
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
} else {
@@ -2334,18 +2436,18 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
}
- hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
- return rocker_cmd_exec(rocker, rocker_port,
- rocker_cmd_flow_tbl_add,
- found, NULL, NULL, nowait);
+ return rocker_cmd_exec(rocker_port, trans, rocker_cmd_flow_tbl_add,
+ found, NULL, NULL);
}
static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
- struct rocker_flow_tbl_entry *match,
- bool nowait)
+ enum switchdev_trans trans,
+ struct rocker_flow_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_flow_tbl_entry *found;
@@ -2360,47 +2462,43 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
found = rocker_flow_tbl_find(rocker, match);
if (found) {
- hash_del(&found->entry);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
}
spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
- kfree(match);
+ rocker_port_kfree(trans, match);
if (found) {
- err = rocker_cmd_exec(rocker, rocker_port,
+ err = rocker_cmd_exec(rocker_port, trans,
rocker_cmd_flow_tbl_del,
- found, NULL, NULL, nowait);
- kfree(found);
+ found, NULL, NULL);
+ rocker_port_kfree(trans, found);
}
return err;
}
-static gfp_t rocker_op_flags_gfp(int flags)
-{
- return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
-}
-
static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
- int flags, struct rocker_flow_tbl_entry *entry)
+ enum switchdev_trans trans, int flags,
+ struct rocker_flow_tbl_entry *entry)
{
- bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
-
if (flags & ROCKER_OP_FLAG_REMOVE)
- return rocker_flow_tbl_del(rocker_port, entry, nowait);
+ return rocker_flow_tbl_del(rocker_port, trans, entry);
else
- return rocker_flow_tbl_add(rocker_port, entry, nowait);
+ return rocker_flow_tbl_add(rocker_port, trans, entry);
}
static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
- int flags, u32 in_pport, u32 in_pport_mask,
+ enum switchdev_trans trans, int flags,
+ u32 in_pport, u32 in_pport_mask,
enum rocker_of_dpa_table_id goto_tbl)
{
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2410,18 +2508,19 @@ static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
entry->key.ig_port.in_pport_mask = in_pport_mask;
entry->key.ig_port.goto_tbl = goto_tbl;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
- int flags, u32 in_pport,
- __be16 vlan_id, __be16 vlan_id_mask,
+ enum switchdev_trans trans, int flags,
+ u32 in_pport, __be16 vlan_id,
+ __be16 vlan_id_mask,
enum rocker_of_dpa_table_id goto_tbl,
bool untagged, __be16 new_vlan_id)
{
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2435,10 +2534,11 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
entry->key.vlan.untagged = untagged;
entry->key.vlan.new_vlan_id = new_vlan_id;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
u32 in_pport, u32 in_pport_mask,
__be16 eth_type, const u8 *eth_dst,
const u8 *eth_dst_mask, __be16 vlan_id,
@@ -2447,7 +2547,7 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
{
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2471,11 +2571,11 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
entry->key.term_mac.vlan_id_mask = vlan_id_mask;
entry->key.term_mac.copy_to_cpu = copy_to_cpu;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
- int flags,
+ enum switchdev_trans trans, int flags,
const u8 *eth_dst, const u8 *eth_dst_mask,
__be16 vlan_id, u32 tunnel_id,
enum rocker_of_dpa_table_id goto_tbl,
@@ -2487,7 +2587,7 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
bool wild = false;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2500,7 +2600,7 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
if (eth_dst_mask) {
entry->key.bridge.has_eth_dst_mask = 1;
ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
- if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
+ if (!ether_addr_equal(eth_dst_mask, ff_mac))
wild = true;
}
@@ -2525,10 +2625,11 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
entry->key.bridge.group_id = group_id;
entry->key.bridge.copy_to_cpu = copy_to_cpu;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
__be16 eth_type, __be32 dst,
__be32 dst_mask, u32 priority,
enum rocker_of_dpa_table_id goto_tbl,
@@ -2536,7 +2637,7 @@ static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
{
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2550,30 +2651,29 @@ static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
entry->key_len = offsetof(struct rocker_flow_tbl_key,
ucast_routing.group_id);
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
- int flags, u32 in_pport,
- u32 in_pport_mask,
+ enum switchdev_trans trans, int flags,
+ u32 in_pport, u32 in_pport_mask,
const u8 *eth_src, const u8 *eth_src_mask,
const u8 *eth_dst, const u8 *eth_dst_mask,
- __be16 eth_type,
- __be16 vlan_id, __be16 vlan_id_mask,
- u8 ip_proto, u8 ip_proto_mask,
- u8 ip_tos, u8 ip_tos_mask,
+ __be16 eth_type, __be16 vlan_id,
+ __be16 vlan_id_mask, u8 ip_proto,
+ u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
u32 group_id)
{
u32 priority;
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
priority = ROCKER_PRIORITY_ACL_NORMAL;
if (eth_dst && eth_dst_mask) {
- if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
+ if (ether_addr_equal(eth_dst_mask, mcast_mac))
priority = ROCKER_PRIORITY_ACL_DFLT;
else if (is_link_local_ether_addr(eth_dst))
priority = ROCKER_PRIORITY_ACL_CTRL;
@@ -2602,12 +2702,12 @@ static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
entry->key.acl.ip_tos_mask = ip_tos_mask;
entry->key.acl.group_id = group_id;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static struct rocker_group_tbl_entry *
-rocker_group_tbl_find(struct rocker *rocker,
- struct rocker_group_tbl_entry *match)
+rocker_group_tbl_find(const struct rocker *rocker,
+ const struct rocker_group_tbl_entry *match)
{
struct rocker_group_tbl_entry *found;
@@ -2620,22 +2720,23 @@ rocker_group_tbl_find(struct rocker *rocker,
return NULL;
}
-static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
+static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
+ struct rocker_group_tbl_entry *entry)
{
switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
- kfree(entry->group_ids);
+ rocker_port_kfree(trans, entry->group_ids);
break;
default:
break;
}
- kfree(entry);
+ rocker_port_kfree(trans, entry);
}
static int rocker_group_tbl_add(struct rocker_port *rocker_port,
- struct rocker_group_tbl_entry *match,
- bool nowait)
+ enum switchdev_trans trans,
+ struct rocker_group_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_group_tbl_entry *found;
@@ -2646,8 +2747,9 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
found = rocker_group_tbl_find(rocker, match);
if (found) {
- hash_del(&found->entry);
- rocker_group_tbl_entry_free(found);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
+ rocker_group_tbl_entry_free(trans, found);
found = match;
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
} else {
@@ -2655,18 +2757,18 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
}
- hash_add(rocker->group_tbl, &found->entry, found->group_id);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_add(rocker->group_tbl, &found->entry, found->group_id);
spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
- return rocker_cmd_exec(rocker, rocker_port,
- rocker_cmd_group_tbl_add,
- found, NULL, NULL, nowait);
+ return rocker_cmd_exec(rocker_port, trans, rocker_cmd_group_tbl_add,
+ found, NULL, NULL);
}
static int rocker_group_tbl_del(struct rocker_port *rocker_port,
- struct rocker_group_tbl_entry *match,
- bool nowait)
+ enum switchdev_trans trans,
+ struct rocker_group_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_group_tbl_entry *found;
@@ -2678,93 +2780,95 @@ static int rocker_group_tbl_del(struct rocker_port *rocker_port,
found = rocker_group_tbl_find(rocker, match);
if (found) {
- hash_del(&found->entry);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
}
spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
- rocker_group_tbl_entry_free(match);
+ rocker_group_tbl_entry_free(trans, match);
if (found) {
- err = rocker_cmd_exec(rocker, rocker_port,
+ err = rocker_cmd_exec(rocker_port, trans,
rocker_cmd_group_tbl_del,
- found, NULL, NULL, nowait);
- rocker_group_tbl_entry_free(found);
+ found, NULL, NULL);
+ rocker_group_tbl_entry_free(trans, found);
}
return err;
}
static int rocker_group_tbl_do(struct rocker_port *rocker_port,
- int flags, struct rocker_group_tbl_entry *entry)
+ enum switchdev_trans trans, int flags,
+ struct rocker_group_tbl_entry *entry)
{
- bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
-
if (flags & ROCKER_OP_FLAG_REMOVE)
- return rocker_group_tbl_del(rocker_port, entry, nowait);
+ return rocker_group_tbl_del(rocker_port, trans, entry);
else
- return rocker_group_tbl_add(rocker_port, entry, nowait);
+ return rocker_group_tbl_add(rocker_port, trans, entry);
}
static int rocker_group_l2_interface(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id,
- u32 out_pport, int pop_vlan)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id, u32 out_pport,
+ int pop_vlan)
{
struct rocker_group_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
entry->l2_interface.pop_vlan = pop_vlan;
- return rocker_group_tbl_do(rocker_port, flags, entry);
+ return rocker_group_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
int flags, u8 group_count,
- u32 *group_ids, u32 group_id)
+ const u32 *group_ids, u32 group_id)
{
struct rocker_group_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
entry->group_id = group_id;
entry->group_count = group_count;
- entry->group_ids = kcalloc(group_count, sizeof(u32),
- rocker_op_flags_gfp(flags));
+ entry->group_ids = rocker_port_kcalloc(rocker_port, trans, group_count,
+ sizeof(u32));
if (!entry->group_ids) {
- kfree(entry);
+ rocker_port_kfree(trans, entry);
return -ENOMEM;
}
memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
- return rocker_group_tbl_do(rocker_port, flags, entry);
+ return rocker_group_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_group_l2_flood(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id,
- u8 group_count, u32 *group_ids,
- u32 group_id)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id, u8 group_count,
+ const u32 *group_ids, u32 group_id)
{
- return rocker_group_l2_fan_out(rocker_port, flags,
+ return rocker_group_l2_fan_out(rocker_port, trans, flags,
group_count, group_ids,
group_id);
}
static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
- int flags, u32 index, u8 *src_mac,
- u8 *dst_mac, __be16 vlan_id,
- bool ttl_check, u32 pport)
+ enum switchdev_trans trans, int flags,
+ u32 index, const u8 *src_mac, const u8 *dst_mac,
+ __be16 vlan_id, bool ttl_check, u32 pport)
{
struct rocker_group_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2777,11 +2881,11 @@ static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
entry->l3_unicast.ttl_check = ttl_check;
entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
- return rocker_group_tbl_do(rocker_port, flags, entry);
+ return rocker_group_tbl_do(rocker_port, trans, flags, entry);
}
static struct rocker_neigh_tbl_entry *
- rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr)
+rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
{
struct rocker_neigh_tbl_entry *found;
@@ -2794,37 +2898,44 @@ static struct rocker_neigh_tbl_entry *
}
static void _rocker_neigh_add(struct rocker *rocker,
+ enum switchdev_trans trans,
struct rocker_neigh_tbl_entry *entry)
{
- entry->index = rocker->neigh_tbl_next_index++;
+ entry->index = rocker->neigh_tbl_next_index;
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ return;
+ rocker->neigh_tbl_next_index++;
entry->ref_count++;
hash_add(rocker->neigh_tbl, &entry->entry,
be32_to_cpu(entry->ip_addr));
}
-static void _rocker_neigh_del(struct rocker *rocker,
+static void _rocker_neigh_del(enum switchdev_trans trans,
struct rocker_neigh_tbl_entry *entry)
{
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ return;
if (--entry->ref_count == 0) {
hash_del(&entry->entry);
- kfree(entry);
+ rocker_port_kfree(trans, entry);
}
}
-static void _rocker_neigh_update(struct rocker *rocker,
- struct rocker_neigh_tbl_entry *entry,
- u8 *eth_dst, bool ttl_check)
+static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
+ enum switchdev_trans trans,
+ const u8 *eth_dst, bool ttl_check)
{
if (eth_dst) {
ether_addr_copy(entry->eth_dst, eth_dst);
entry->ttl_check = ttl_check;
- } else {
+ } else if (trans != SWITCHDEV_TRANS_PREPARE) {
entry->ref_count++;
}
}
static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
- int flags, __be32 ip_addr, u8 *eth_dst)
+ enum switchdev_trans trans,
+ int flags, __be32 ip_addr, const u8 *eth_dst)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_neigh_tbl_entry *entry;
@@ -2840,7 +2951,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
bool removing;
int err = 0;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2857,12 +2968,12 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
entry->dev = rocker_port->dev;
ether_addr_copy(entry->eth_dst, eth_dst);
entry->ttl_check = true;
- _rocker_neigh_add(rocker, entry);
+ _rocker_neigh_add(rocker, trans, entry);
} else if (removing) {
memcpy(entry, found, sizeof(*entry));
- _rocker_neigh_del(rocker, found);
+ _rocker_neigh_del(trans, found);
} else if (updating) {
- _rocker_neigh_update(rocker, found, eth_dst, true);
+ _rocker_neigh_update(found, trans, eth_dst, true);
memcpy(entry, found, sizeof(*entry));
} else {
err = -ENOENT;
@@ -2879,7 +2990,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
* other routes' nexthops.
*/
- err = rocker_group_l3_unicast(rocker_port, flags,
+ err = rocker_group_l3_unicast(rocker_port, trans, flags,
entry->index,
rocker_port->dev->dev_addr,
entry->eth_dst,
@@ -2895,7 +3006,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
if (adding || removing) {
group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
- err = rocker_flow_tbl_ucast4_routing(rocker_port,
+ err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
eth_type, ip_addr,
inet_make_mask(32),
priority, goto_tbl,
@@ -2909,13 +3020,13 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
err_out:
if (!adding)
- kfree(entry);
+ rocker_port_kfree(trans, entry);
return err;
}
static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
- __be32 ip_addr)
+ enum switchdev_trans trans, __be32 ip_addr)
{
struct net_device *dev = rocker_port->dev;
struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
@@ -2933,7 +3044,8 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
*/
if (n->nud_state & NUD_VALID)
- err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha);
+ err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
+ ip_addr, n->ha);
else
neigh_event_send(n, NULL);
@@ -2941,7 +3053,8 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
return err;
}
-static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
+static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags,
__be32 ip_addr, u32 *index)
{
struct rocker *rocker = rocker_port->rocker;
@@ -2954,7 +3067,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
bool resolved = true;
int err = 0;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2971,13 +3084,13 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
if (adding) {
entry->ip_addr = ip_addr;
entry->dev = rocker_port->dev;
- _rocker_neigh_add(rocker, entry);
+ _rocker_neigh_add(rocker, trans, entry);
*index = entry->index;
resolved = false;
} else if (removing) {
- _rocker_neigh_del(rocker, found);
+ _rocker_neigh_del(trans, found);
} else if (updating) {
- _rocker_neigh_update(rocker, found, NULL, false);
+ _rocker_neigh_update(found, trans, NULL, false);
resolved = !is_zero_ether_addr(found->eth_dst);
} else {
err = -ENOENT;
@@ -2986,7 +3099,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
if (!adding)
- kfree(entry);
+ rocker_port_kfree(trans, entry);
if (err)
return err;
@@ -2994,24 +3107,25 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
/* Resolved means neigh ip_addr is resolved to neigh mac. */
if (!resolved)
- err = rocker_port_ipv4_resolve(rocker_port, ip_addr);
+ err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
return err;
}
static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
int flags, __be16 vlan_id)
{
struct rocker_port *p;
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
u32 *group_ids;
u8 group_count = 0;
int err = 0;
int i;
- group_ids = kcalloc(rocker->port_count, sizeof(u32),
- rocker_op_flags_gfp(flags));
+ group_ids = rocker_port_kcalloc(rocker_port, trans, rocker->port_count,
+ sizeof(u32));
if (!group_ids)
return -ENOMEM;
@@ -3022,6 +3136,8 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
for (i = 0; i < rocker->port_count; i++) {
p = rocker->ports[i];
+ if (!p)
+ continue;
if (!rocker_port_is_bridged(p))
continue;
if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
@@ -3034,23 +3150,22 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
if (group_count == 0)
goto no_ports_in_vlan;
- err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
- group_count, group_ids,
- group_id);
+ err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
+ group_count, group_ids, group_id);
if (err)
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 flood group\n", err);
no_ports_in_vlan:
- kfree(group_ids);
+ rocker_port_kfree(trans, group_ids);
return err;
}
static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id,
- bool pop_vlan)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id, bool pop_vlan)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct rocker_port *p;
bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
u32 out_pport;
@@ -3065,9 +3180,8 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
if (rocker_port->stp_state == BR_STATE_LEARNING ||
rocker_port->stp_state == BR_STATE_FORWARDING) {
out_pport = rocker_port->pport;
- err = rocker_group_l2_interface(rocker_port, flags,
- vlan_id, out_pport,
- pop_vlan);
+ err = rocker_group_l2_interface(rocker_port, trans, flags,
+ vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 group for pport %d\n",
@@ -3083,7 +3197,7 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
for (i = 0; i < rocker->port_count; i++) {
p = rocker->ports[i];
- if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
+ if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
ref++;
}
@@ -3091,9 +3205,8 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
return 0;
out_pport = 0;
- err = rocker_group_l2_interface(rocker_port, flags,
- vlan_id, out_pport,
- pop_vlan);
+ err = rocker_group_l2_interface(rocker_port, trans, flags,
+ vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 group for CPU port\n", err);
@@ -3149,14 +3262,14 @@ static struct rocker_ctrl {
};
static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
- int flags, struct rocker_ctrl *ctrl,
- __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport = rocker_port->pport;
u32 in_pport_mask = 0xffffffff;
u32 out_pport = 0;
- u8 *eth_src = NULL;
- u8 *eth_src_mask = NULL;
+ const u8 *eth_src = NULL;
+ const u8 *eth_src_mask = NULL;
__be16 vlan_id_mask = htons(0xffff);
u8 ip_proto = 0;
u8 ip_proto_mask = 0;
@@ -3165,7 +3278,7 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
int err;
- err = rocker_flow_tbl_acl(rocker_port, flags,
+ err = rocker_flow_tbl_acl(rocker_port, trans, flags,
in_pport, in_pport_mask,
eth_src, eth_src_mask,
ctrl->eth_dst, ctrl->eth_dst_mask,
@@ -3182,7 +3295,8 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
- int flags, struct rocker_ctrl *ctrl,
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl,
__be16 vlan_id)
{
enum rocker_of_dpa_table_id goto_tbl =
@@ -3194,7 +3308,7 @@ static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
if (!rocker_port_is_bridged(rocker_port))
return 0;
- err = rocker_flow_tbl_bridge(rocker_port, flags,
+ err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
ctrl->eth_dst, ctrl->eth_dst_mask,
vlan_id, tunnel_id,
goto_tbl, group_id, ctrl->copy_to_cpu);
@@ -3206,8 +3320,8 @@ static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
- int flags, struct rocker_ctrl *ctrl,
- __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
__be16 vlan_id_mask = htons(0xffff);
@@ -3216,7 +3330,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
if (ntohs(vlan_id) == 0)
vlan_id = rocker_port->internal_vlan_id;
- err = rocker_flow_tbl_term_mac(rocker_port,
+ err = rocker_flow_tbl_term_mac(rocker_port, trans,
rocker_port->pport, in_pport_mask,
ctrl->eth_type, ctrl->eth_dst,
ctrl->eth_dst_mask, vlan_id,
@@ -3229,32 +3343,34 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
return err;
}
-static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
- struct rocker_ctrl *ctrl, __be16 vlan_id)
+static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
if (ctrl->acl)
- return rocker_port_ctrl_vlan_acl(rocker_port, flags,
+ return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
ctrl, vlan_id);
if (ctrl->bridge)
- return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
+ return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
ctrl, vlan_id);
if (ctrl->term)
- return rocker_port_ctrl_vlan_term(rocker_port, flags,
+ return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
ctrl, vlan_id);
return -EOPNOTSUPP;
}
static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id)
{
int err = 0;
int i;
for (i = 0; i < ROCKER_CTRL_MAX; i++) {
if (rocker_port->ctrls[i]) {
- err = rocker_port_ctrl_vlan(rocker_port, flags,
+ err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
&rocker_ctrls[i], vlan_id);
if (err)
return err;
@@ -3264,8 +3380,9 @@ static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
return err;
}
-static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
- struct rocker_ctrl *ctrl)
+static int rocker_port_ctrl(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl)
{
u16 vid;
int err = 0;
@@ -3273,7 +3390,7 @@ static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
for (vid = 1; vid < VLAN_N_VID; vid++) {
if (!test_bit(vid, rocker_port->vlan_bitmap))
continue;
- err = rocker_port_ctrl_vlan(rocker_port, flags,
+ err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
ctrl, htons(vid));
if (err)
break;
@@ -3282,8 +3399,8 @@ static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
return err;
}
-static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
- u16 vid)
+static int rocker_port_vlan(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags, u16 vid)
{
enum rocker_of_dpa_table_id goto_tbl =
ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
@@ -3297,50 +3414,57 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
- if (adding && test_and_set_bit(ntohs(internal_vlan_id),
- rocker_port->vlan_bitmap))
+ if (adding && test_bit(ntohs(internal_vlan_id),
+ rocker_port->vlan_bitmap))
return 0; /* already added */
- else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
- rocker_port->vlan_bitmap))
+ else if (!adding && !test_bit(ntohs(internal_vlan_id),
+ rocker_port->vlan_bitmap))
return 0; /* already removed */
+ change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
+
if (adding) {
- err = rocker_port_ctrl_vlan_add(rocker_port, flags,
+ err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
internal_vlan_id);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port ctrl vlan add\n", err);
- return err;
+ goto err_out;
}
}
- err = rocker_port_vlan_l2_groups(rocker_port, flags,
+ err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
internal_vlan_id, untagged);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 groups\n", err);
- return err;
+ goto err_out;
}
- err = rocker_port_vlan_flood_group(rocker_port, flags,
+ err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
internal_vlan_id);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 flood group\n", err);
- return err;
+ goto err_out;
}
- err = rocker_flow_tbl_vlan(rocker_port, flags,
+ err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
in_pport, vlan_id, vlan_id_mask,
goto_tbl, untagged, internal_vlan_id);
if (err)
netdev_err(rocker_port->dev,
"Error (%d) port VLAN table\n", err);
+err_out:
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
+
return err;
}
-static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
+static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags)
{
enum rocker_of_dpa_table_id goto_tbl;
u32 in_pport;
@@ -3355,7 +3479,7 @@ static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
in_pport_mask = 0xffff0000;
goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
- err = rocker_flow_tbl_ig_port(rocker_port, flags,
+ err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
in_pport, in_pport_mask,
goto_tbl);
if (err)
@@ -3367,7 +3491,8 @@ static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
struct rocker_fdb_learn_work {
struct work_struct work;
- struct net_device *dev;
+ struct rocker_port *rocker_port;
+ enum switchdev_trans trans;
int flags;
u8 addr[ETH_ALEN];
u16 vid;
@@ -3375,27 +3500,28 @@ struct rocker_fdb_learn_work {
static void rocker_port_fdb_learn_work(struct work_struct *work)
{
- struct rocker_fdb_learn_work *lw =
+ const struct rocker_fdb_learn_work *lw =
container_of(work, struct rocker_fdb_learn_work, work);
bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
- struct netdev_switch_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info;
info.addr = lw->addr;
info.vid = lw->vid;
if (learned && removing)
- call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
- lw->dev, &info.info);
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
+ lw->rocker_port->dev, &info.info);
else if (learned && !removing)
- call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
- lw->dev, &info.info);
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
+ lw->rocker_port->dev, &info.info);
- kfree(work);
+ rocker_port_kfree(lw->trans, work);
}
static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
- int flags, const u8 *addr, __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ const u8 *addr, __be16 vlan_id)
{
struct rocker_fdb_learn_work *lw;
enum rocker_of_dpa_table_id goto_tbl =
@@ -3411,8 +3537,8 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
- err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
- vlan_id, tunnel_id, goto_tbl,
+ err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
+ NULL, vlan_id, tunnel_id, goto_tbl,
group_id, copy_to_cpu);
if (err)
return err;
@@ -3424,24 +3550,29 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
if (!rocker_port_is_bridged(rocker_port))
return 0;
- lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
+ lw = rocker_port_kzalloc(rocker_port, trans, sizeof(*lw));
if (!lw)
return -ENOMEM;
INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
- lw->dev = rocker_port->dev;
+ lw->rocker_port = rocker_port;
+ lw->trans = trans;
lw->flags = flags;
ether_addr_copy(lw->addr, addr);
lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
- schedule_work(&lw->work);
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ rocker_port_kfree(trans, lw);
+ else
+ schedule_work(&lw->work);
return 0;
}
static struct rocker_fdb_tbl_entry *
-rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
+rocker_fdb_tbl_find(const struct rocker *rocker,
+ const struct rocker_fdb_tbl_entry *match)
{
struct rocker_fdb_tbl_entry *found;
@@ -3453,6 +3584,7 @@ rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
}
static int rocker_port_fdb(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
const unsigned char *addr,
__be16 vlan_id, int flags)
{
@@ -3462,7 +3594,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
unsigned long lock_flags;
- fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
+ fdb = rocker_port_kzalloc(rocker_port, trans, sizeof(*fdb));
if (!fdb)
return -ENOMEM;
@@ -3477,32 +3609,35 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
found = rocker_fdb_tbl_find(rocker, fdb);
if (removing && found) {
- kfree(fdb);
- hash_del(&found->entry);
+ rocker_port_kfree(trans, fdb);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
} else if (!removing && !found) {
- hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
}
spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
/* Check if adding and already exists, or removing and can't find */
if (!found != !removing) {
- kfree(fdb);
+ rocker_port_kfree(trans, fdb);
if (!found && removing)
return 0;
/* Refreshing existing to update aging timers */
flags |= ROCKER_OP_FLAG_REFRESH;
}
- return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
+ return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
}
-static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
+static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
+ enum switchdev_trans trans)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_fdb_tbl_entry *found;
unsigned long lock_flags;
- int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
+ int flags = ROCKER_OP_FLAG_REMOVE;
struct hlist_node *tmp;
int bkt;
int err = 0;
@@ -3518,12 +3653,13 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
continue;
if (!found->learned)
continue;
- err = rocker_port_fdb_learn(rocker_port, flags,
+ err = rocker_port_fdb_learn(rocker_port, trans, flags,
found->key.addr,
found->key.vlan_id);
if (err)
goto err_out;
- hash_del(&found->entry);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
}
err_out:
@@ -3533,7 +3669,8 @@ err_out:
}
static int rocker_port_router_mac(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
__be16 eth_type;
@@ -3546,7 +3683,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
vlan_id = rocker_port->internal_vlan_id;
eth_type = htons(ETH_P_IP);
- err = rocker_flow_tbl_term_mac(rocker_port,
+ err = rocker_flow_tbl_term_mac(rocker_port, trans,
rocker_port->pport, in_pport_mask,
eth_type, rocker_port->dev->dev_addr,
dst_mac_mask, vlan_id, vlan_id_mask,
@@ -3555,7 +3692,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
return err;
eth_type = htons(ETH_P_IPV6);
- err = rocker_flow_tbl_term_mac(rocker_port,
+ err = rocker_flow_tbl_term_mac(rocker_port, trans,
rocker_port->pport, in_pport_mask,
eth_type, rocker_port->dev->dev_addr,
dst_mac_mask, vlan_id, vlan_id_mask,
@@ -3564,13 +3701,14 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
return err;
}
-static int rocker_port_fwding(struct rocker_port *rocker_port)
+static int rocker_port_fwding(struct rocker_port *rocker_port,
+ enum switchdev_trans trans)
{
bool pop_vlan;
u32 out_pport;
__be16 vlan_id;
u16 vid;
- int flags = ROCKER_OP_FLAG_NOWAIT;
+ int flags = 0;
int err;
/* Port will be forwarding-enabled if its STP state is LEARNING
@@ -3590,9 +3728,8 @@ static int rocker_port_fwding(struct rocker_port *rocker_port)
continue;
vlan_id = htons(vid);
pop_vlan = rocker_vlan_id_is_internal(vlan_id);
- err = rocker_group_l2_interface(rocker_port, flags,
- vlan_id, out_pport,
- pop_vlan);
+ err = rocker_group_l2_interface(rocker_port, trans, flags,
+ vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 group for pport %d\n",
@@ -3604,13 +3741,21 @@ static int rocker_port_fwding(struct rocker_port *rocker_port)
return 0;
}
-static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
+static int rocker_port_stp_update(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, u8 state)
{
bool want[ROCKER_CTRL_MAX] = { 0, };
+ bool prev_ctrls[ROCKER_CTRL_MAX];
+ u8 prev_state;
int flags;
int err;
int i;
+ if (trans == SWITCHDEV_TRANS_PREPARE) {
+ memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
+ prev_state = rocker_port->stp_state;
+ }
+
if (rocker_port->stp_state == state)
return 0;
@@ -3638,45 +3783,54 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
for (i = 0; i < ROCKER_CTRL_MAX; i++) {
if (want[i] != rocker_port->ctrls[i]) {
- flags = ROCKER_OP_FLAG_NOWAIT |
- (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
- err = rocker_port_ctrl(rocker_port, flags,
+ flags = (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
+ err = rocker_port_ctrl(rocker_port, trans, flags,
&rocker_ctrls[i]);
if (err)
- return err;
+ goto err_out;
rocker_port->ctrls[i] = want[i];
}
}
- err = rocker_port_fdb_flush(rocker_port);
+ err = rocker_port_fdb_flush(rocker_port, trans);
if (err)
- return err;
+ goto err_out;
+
+ err = rocker_port_fwding(rocker_port, trans);
- return rocker_port_fwding(rocker_port);
+err_out:
+ if (trans == SWITCHDEV_TRANS_PREPARE) {
+ memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
+ rocker_port->stp_state = prev_state;
+ }
+
+ return err;
}
-static int rocker_port_fwd_enable(struct rocker_port *rocker_port)
+static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
+ enum switchdev_trans trans)
{
if (rocker_port_is_bridged(rocker_port))
/* bridge STP will enable port */
return 0;
/* port is not bridged, so simulate going to FORWARDING state */
- return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING);
+ return rocker_port_stp_update(rocker_port, trans, BR_STATE_FORWARDING);
}
-static int rocker_port_fwd_disable(struct rocker_port *rocker_port)
+static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
+ enum switchdev_trans trans)
{
if (rocker_port_is_bridged(rocker_port))
/* bridge STP will disable port */
return 0;
/* port is not bridged, so simulate going to DISABLED state */
- return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
+ return rocker_port_stp_update(rocker_port, trans, BR_STATE_DISABLED);
}
static struct rocker_internal_vlan_tbl_entry *
-rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
+rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
{
struct rocker_internal_vlan_tbl_entry *found;
@@ -3731,8 +3885,9 @@ found:
return found->vlan_id;
}
-static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
- int ifindex)
+static void
+rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
+ int ifindex)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_internal_vlan_tbl_entry *found;
@@ -3760,11 +3915,12 @@ not_found:
spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
}
-static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
- int dst_len, struct fib_info *fi, u32 tb_id,
- int flags)
+static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, __be32 dst,
+ int dst_len, const struct fib_info *fi,
+ u32 tb_id, int flags)
{
- struct fib_nh *nh;
+ const struct fib_nh *nh;
__be16 eth_type = htons(ETH_P_IP);
__be32 dst_mask = inet_make_mask(dst_len);
__be16 internal_vlan_id = rocker_port->internal_vlan_id;
@@ -3784,7 +3940,7 @@ static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
has_gw = !!nh->nh_gw;
if (has_gw && nh_on_port) {
- err = rocker_port_ipv4_nh(rocker_port, flags,
+ err = rocker_port_ipv4_nh(rocker_port, trans, flags,
nh->nh_gw, &index);
if (err)
return err;
@@ -3795,7 +3951,7 @@ static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
}
- err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst,
+ err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
dst_mask, priority, goto_tbl,
group_id, flags);
if (err)
@@ -3834,7 +3990,7 @@ static int rocker_port_open(struct net_device *dev)
goto err_request_rx_irq;
}
- err = rocker_port_fwd_enable(rocker_port);
+ err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE);
if (err)
goto err_fwd_enable;
@@ -3861,7 +4017,7 @@ static int rocker_port_stop(struct net_device *dev)
rocker_port_set_enable(rocker_port, false);
napi_disable(&rocker_port->napi_rx);
napi_disable(&rocker_port->napi_tx);
- rocker_port_fwd_disable(rocker_port);
+ rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE);
free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
rocker_port_dma_rings_fini(rocker_port);
@@ -3869,12 +4025,12 @@ static int rocker_port_stop(struct net_device *dev)
return 0;
}
-static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info)
+static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct pci_dev *pdev = rocker->pdev;
- struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
struct rocker_tlv *attr;
int rem;
@@ -3882,7 +4038,7 @@ static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
if (!attrs[ROCKER_TLV_TX_FRAGS])
return;
rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
- struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
+ const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
dma_addr_t dma_handle;
size_t len;
@@ -3899,11 +4055,11 @@ static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
}
}
-static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
+static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
char *buf, size_t buf_len)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
struct rocker_tlv *frag;
@@ -4008,269 +4164,333 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
- __be16 proto, u16 vid)
+static int rocker_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
{
struct rocker_port *rocker_port = netdev_priv(dev);
+ struct port_name name = { .buf = buf, .len = len };
int err;
- err = rocker_port_vlan(rocker_port, 0, vid);
- if (err)
- return err;
+ err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
+ rocker_cmd_get_port_settings_prep, NULL,
+ rocker_cmd_get_port_settings_phys_name_proc,
+ &name);
- return rocker_port_router_mac(rocker_port, 0, htons(vid));
+ return err ? -EOPNOTSUPP : 0;
}
-static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
- __be16 proto, u16 vid)
+static const struct net_device_ops rocker_port_netdev_ops = {
+ .ndo_open = rocker_port_open,
+ .ndo_stop = rocker_port_stop,
+ .ndo_start_xmit = rocker_port_xmit,
+ .ndo_set_mac_address = rocker_port_set_mac_address,
+ .ndo_bridge_getlink = switchdev_port_bridge_getlink,
+ .ndo_bridge_setlink = switchdev_port_bridge_setlink,
+ .ndo_bridge_dellink = switchdev_port_bridge_dellink,
+ .ndo_fdb_add = switchdev_port_fdb_add,
+ .ndo_fdb_del = switchdev_port_fdb_del,
+ .ndo_fdb_dump = switchdev_port_fdb_dump,
+ .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
+};
+
+/********************
+ * swdev interface
+ ********************/
+
+static int rocker_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int err;
+ const struct rocker_port *rocker_port = netdev_priv(dev);
+ const struct rocker *rocker = rocker_port->rocker;
- err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
- htons(vid));
- if (err)
- return err;
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(rocker->hw.id);
+ memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
+ break;
+ case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+ attr->u.brport_flags = rocker_port->brport_flags;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
+ return 0;
}
-static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid,
- u16 nlm_flags)
+static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
- int flags = 0;
+ struct list_head *mem, *tmp;
- if (!rocker_port_is_bridged(rocker_port))
- return -EINVAL;
-
- return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+ list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
+ list_del(mem);
+ kfree(mem);
+ }
}
-static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid)
+static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ unsigned long brport_flags)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
- int flags = ROCKER_OP_FLAG_REMOVE;
+ unsigned long orig_flags;
+ int err = 0;
- if (!rocker_port_is_bridged(rocker_port))
- return -EINVAL;
+ orig_flags = rocker_port->brport_flags;
+ rocker_port->brport_flags = brport_flags;
+ if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
+ err = rocker_port_set_learning(rocker_port, trans);
- return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ rocker_port->brport_flags = orig_flags;
+
+ return err;
}
-static int rocker_fdb_fill_info(struct sk_buff *skb,
- struct rocker_port *rocker_port,
- const unsigned char *addr, u16 vid,
- u32 portid, u32 seq, int type,
- unsigned int flags)
+static int rocker_port_attr_set(struct net_device *dev,
+ struct switchdev_attr *attr)
{
- struct nlmsghdr *nlh;
- struct ndmsg *ndm;
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int err = 0;
- nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
- if (!nlh)
- return -EMSGSIZE;
+ switch (attr->trans) {
+ case SWITCHDEV_TRANS_PREPARE:
+ BUG_ON(!list_empty(&rocker_port->trans_mem));
+ break;
+ case SWITCHDEV_TRANS_ABORT:
+ rocker_port_trans_abort(rocker_port);
+ return 0;
+ default:
+ break;
+ }
- ndm = nlmsg_data(nlh);
- ndm->ndm_family = AF_BRIDGE;
- ndm->ndm_pad1 = 0;
- ndm->ndm_pad2 = 0;
- ndm->ndm_flags = NTF_SELF;
- ndm->ndm_type = 0;
- ndm->ndm_ifindex = rocker_port->dev->ifindex;
- ndm->ndm_state = NUD_REACHABLE;
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_PORT_STP_STATE:
+ err = rocker_port_stp_update(rocker_port, attr->trans,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+ err = rocker_port_brport_flags_set(rocker_port, attr->trans,
+ attr->u.brport_flags);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
- if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
- goto nla_put_failure;
+ return err;
+}
- if (vid && nla_put_u16(skb, NDA_VLAN, vid))
- goto nla_put_failure;
+static int rocker_port_vlan_add(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, u16 vid, u16 flags)
+{
+ int err;
- nlmsg_end(skb, nlh);
- return 0;
+ /* XXX deal with flags for PVID and untagged */
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
+ err = rocker_port_vlan(rocker_port, trans, 0, vid);
+ if (err)
+ return err;
+
+ err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
+ if (err)
+ rocker_port_vlan(rocker_port, trans,
+ ROCKER_OP_FLAG_REMOVE, vid);
+
+ return err;
}
-static int rocker_port_fdb_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct net_device *dev,
- struct net_device *filter_dev,
- int idx)
+static int rocker_port_vlans_add(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ const struct switchdev_obj_vlan *vlan)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_fdb_tbl_entry *found;
- struct hlist_node *tmp;
- int bkt;
- unsigned long lock_flags;
- const unsigned char *addr;
u16 vid;
int err;
- spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
- hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
- if (found->key.pport != rocker_port->pport)
- continue;
- if (idx < cb->args[0])
- goto skip;
- addr = found->key.addr;
- vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id);
- err = rocker_fdb_fill_info(skb, rocker_port, addr, vid,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH, NLM_F_MULTI);
- if (err < 0)
- break;
-skip:
- ++idx;
+ for (vid = vlan->vid_start; vid <= vlan->vid_end; vid++) {
+ err = rocker_port_vlan_add(rocker_port, trans,
+ vid, vlan->flags);
+ if (err)
+ return err;
}
- spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
- return idx;
+
+ return 0;
}
-static int rocker_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags)
+static int rocker_port_fdb_add(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ const struct switchdev_obj_fdb *fdb)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct nlattr *protinfo;
- struct nlattr *attr;
- int err;
+ __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
+ int flags = 0;
- protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
- IFLA_PROTINFO);
- if (protinfo) {
- attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING);
- if (attr) {
- if (nla_len(attr) < sizeof(u8))
- return -EINVAL;
-
- if (nla_get_u8(attr))
- rocker_port->brport_flags |= BR_LEARNING;
- else
- rocker_port->brport_flags &= ~BR_LEARNING;
- err = rocker_port_set_learning(rocker_port);
- if (err)
- return err;
- }
- attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC);
- if (attr) {
- if (nla_len(attr) < sizeof(u8))
- return -EINVAL;
-
- if (nla_get_u8(attr))
- rocker_port->brport_flags |= BR_LEARNING_SYNC;
- else
- rocker_port->brport_flags &= ~BR_LEARNING_SYNC;
- }
- }
+ if (!rocker_port_is_bridged(rocker_port))
+ return -EINVAL;
- return 0;
+ return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
}
-static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev,
- u32 filter_mask, int nlflags)
+static int rocker_port_obj_add(struct net_device *dev,
+ struct switchdev_obj *obj)
{
struct rocker_port *rocker_port = netdev_priv(dev);
- u16 mode = BRIDGE_MODE_UNDEF;
- u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
+ const struct switchdev_obj_ipv4_fib *fib4;
+ int err = 0;
+
+ switch (obj->trans) {
+ case SWITCHDEV_TRANS_PREPARE:
+ BUG_ON(!list_empty(&rocker_port->trans_mem));
+ break;
+ case SWITCHDEV_TRANS_ABORT:
+ rocker_port_trans_abort(rocker_port);
+ return 0;
+ default:
+ break;
+ }
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_VLAN:
+ err = rocker_port_vlans_add(rocker_port, obj->trans,
+ &obj->u.vlan);
+ break;
+ case SWITCHDEV_OBJ_IPV4_FIB:
+ fib4 = &obj->u.ipv4_fib;
+ err = rocker_port_fib_ipv4(rocker_port, obj->trans,
+ htonl(fib4->dst), fib4->dst_len,
+ fib4->fi, fib4->tb_id, 0);
+ break;
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = rocker_port_fdb_add(rocker_port, obj->trans, &obj->u.fdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
- return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
- rocker_port->brport_flags, mask,
- nlflags);
+ return err;
}
-static int rocker_port_get_phys_port_name(struct net_device *dev,
- char *buf, size_t len)
+static int rocker_port_vlan_del(struct rocker_port *rocker_port,
+ u16 vid, u16 flags)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct port_name name = { .buf = buf, .len = len };
int err;
- err = rocker_cmd_exec(rocker_port->rocker, rocker_port,
- rocker_cmd_get_port_settings_prep, NULL,
- rocker_cmd_get_port_settings_phys_name_proc,
- &name, false);
+ err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_REMOVE, htons(vid));
+ if (err)
+ return err;
- return err ? -EOPNOTSUPP : 0;
+ return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_REMOVE, vid);
}
-static const struct net_device_ops rocker_port_netdev_ops = {
- .ndo_open = rocker_port_open,
- .ndo_stop = rocker_port_stop,
- .ndo_start_xmit = rocker_port_xmit,
- .ndo_set_mac_address = rocker_port_set_mac_address,
- .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid,
- .ndo_fdb_add = rocker_port_fdb_add,
- .ndo_fdb_del = rocker_port_fdb_del,
- .ndo_fdb_dump = rocker_port_fdb_dump,
- .ndo_bridge_setlink = rocker_port_bridge_setlink,
- .ndo_bridge_getlink = rocker_port_bridge_getlink,
- .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
-};
+static int rocker_port_vlans_del(struct rocker_port *rocker_port,
+ const struct switchdev_obj_vlan *vlan)
+{
+ u16 vid;
+ int err;
-/********************
- * swdev interface
- ********************/
+ for (vid = vlan->vid_start; vid <= vlan->vid_end; vid++) {
+ err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
-static int rocker_port_swdev_parent_id_get(struct net_device *dev,
- struct netdev_phys_item_id *psid)
+static int rocker_port_fdb_del(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ const struct switchdev_obj_fdb *fdb)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct rocker *rocker = rocker_port->rocker;
+ __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
+ int flags = ROCKER_OP_FLAG_REMOVE;
- psid->id_len = sizeof(rocker->hw.id);
- memcpy(&psid->id, &rocker->hw.id, psid->id_len);
- return 0;
+ if (!rocker_port_is_bridged(rocker_port))
+ return -EINVAL;
+
+ return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
}
-static int rocker_port_swdev_port_stp_update(struct net_device *dev, u8 state)
+static int rocker_port_obj_del(struct net_device *dev,
+ struct switchdev_obj *obj)
{
struct rocker_port *rocker_port = netdev_priv(dev);
+ const struct switchdev_obj_ipv4_fib *fib4;
+ int err = 0;
- return rocker_port_stp_update(rocker_port, state);
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_VLAN:
+ err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
+ break;
+ case SWITCHDEV_OBJ_IPV4_FIB:
+ fib4 = &obj->u.ipv4_fib;
+ err = rocker_port_fib_ipv4(rocker_port, SWITCHDEV_TRANS_NONE,
+ htonl(fib4->dst), fib4->dst_len,
+ fib4->fi, fib4->tb_id,
+ ROCKER_OP_FLAG_REMOVE);
+ break;
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = rocker_port_fdb_del(rocker_port, obj->trans, &obj->u.fdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
}
-static int rocker_port_swdev_fib_ipv4_add(struct net_device *dev,
- __be32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type,
- u32 nlflags, u32 tb_id)
+static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
+ struct switchdev_obj *obj)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int flags = 0;
+ struct rocker *rocker = rocker_port->rocker;
+ struct switchdev_obj_fdb *fdb = &obj->u.fdb;
+ struct rocker_fdb_tbl_entry *found;
+ struct hlist_node *tmp;
+ unsigned long lock_flags;
+ int bkt;
+ int err = 0;
+
+ spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+ hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
+ if (found->key.pport != rocker_port->pport)
+ continue;
+ fdb->addr = found->key.addr;
+ fdb->vid = rocker_port_vlan_to_vid(rocker_port,
+ found->key.vlan_id);
+ err = obj->cb(rocker_port->dev, obj);
+ if (err)
+ break;
+ }
+ spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
- return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
- fi, tb_id, flags);
+ return err;
}
-static int rocker_port_swdev_fib_ipv4_del(struct net_device *dev,
- __be32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id)
+static int rocker_port_obj_dump(struct net_device *dev,
+ struct switchdev_obj *obj)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int flags = ROCKER_OP_FLAG_REMOVE;
+ const struct rocker_port *rocker_port = netdev_priv(dev);
+ int err = 0;
- return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
- fi, tb_id, flags);
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = rocker_port_fdb_dump(rocker_port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
}
-static const struct swdev_ops rocker_port_swdev_ops = {
- .swdev_parent_id_get = rocker_port_swdev_parent_id_get,
- .swdev_port_stp_update = rocker_port_swdev_port_stp_update,
- .swdev_fib_ipv4_add = rocker_port_swdev_fib_ipv4_add,
- .swdev_fib_ipv4_del = rocker_port_swdev_fib_ipv4_del,
+static const struct switchdev_ops rocker_port_switchdev_ops = {
+ .switchdev_port_attr_get = rocker_port_attr_get,
+ .switchdev_port_attr_set = rocker_port_attr_set,
+ .switchdev_port_obj_add = rocker_port_obj_add,
+ .switchdev_port_obj_del = rocker_port_obj_del,
+ .switchdev_port_obj_dump = rocker_port_obj_dump,
};
/********************
@@ -4334,8 +4554,7 @@ static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
}
static int
-rocker_cmd_get_port_stats_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -4359,14 +4578,13 @@ rocker_cmd_get_port_stats_prep(struct rocker *rocker,
}
static int
-rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
- struct rocker_tlv *pattr;
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
+ const struct rocker_tlv *pattr;
u32 pport;
u64 *data = priv;
int i;
@@ -4400,10 +4618,10 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
void *priv)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_get_port_stats_prep, NULL,
rocker_cmd_get_port_stats_ethtool_proc,
- priv, false);
+ priv);
}
static void rocker_port_get_stats(struct net_device *dev,
@@ -4417,8 +4635,6 @@ static void rocker_port_get_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
data[i] = 0;
}
-
- return;
}
static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
@@ -4453,8 +4669,8 @@ static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
{
struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_desc_info *desc_info;
+ const struct rocker *rocker = rocker_port->rocker;
+ const struct rocker_desc_info *desc_info;
u32 credits = 0;
int err;
@@ -4472,8 +4688,9 @@ static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
if (err == 0) {
rocker_port->dev->stats.tx_packets++;
rocker_port->dev->stats.tx_bytes += skb->len;
- } else
+ } else {
rocker_port->dev->stats.tx_errors++;
+ }
dev_kfree_skb_any(skb);
credits++;
@@ -4488,11 +4705,11 @@ static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
-static int rocker_port_rx_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_port_rx_proc(const struct rocker *rocker,
+ const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
size_t rx_len;
@@ -4514,7 +4731,7 @@ static int rocker_port_rx_proc(struct rocker *rocker,
netif_receive_skb(skb);
- return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
+ return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
}
static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
@@ -4525,7 +4742,7 @@ static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
{
struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct rocker_desc_info *desc_info;
u32 credits = 0;
int err;
@@ -4565,9 +4782,9 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
* PCI driver ops
*****************/
-static void rocker_carrier_init(struct rocker_port *rocker_port)
+static void rocker_carrier_init(const struct rocker_port *rocker_port)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
bool link_up;
@@ -4578,23 +4795,24 @@ static void rocker_carrier_init(struct rocker_port *rocker_port)
netif_carrier_off(rocker_port->dev);
}
-static void rocker_remove_ports(struct rocker *rocker)
+static void rocker_remove_ports(const struct rocker *rocker)
{
struct rocker_port *rocker_port;
int i;
for (i = 0; i < rocker->port_count; i++) {
rocker_port = rocker->ports[i];
- rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
+ rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_REMOVE);
unregister_netdev(rocker_port->dev);
}
kfree(rocker->ports);
}
-static void rocker_port_dev_addr_init(struct rocker *rocker,
- struct rocker_port *rocker_port)
+static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct rocker *rocker = rocker_port->rocker;
+ const struct pci_dev *pdev = rocker->pdev;
int err;
err = rocker_cmd_get_port_settings_macaddr(rocker_port,
@@ -4607,9 +4825,10 @@ static void rocker_port_dev_addr_init(struct rocker *rocker,
static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
struct rocker_port *rocker_port;
struct net_device *dev;
+ u16 untagged_vid = 0;
int err;
dev = alloc_etherdev(sizeof(struct rocker_port));
@@ -4621,20 +4840,19 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_port->port_number = port_number;
rocker_port->pport = port_number + 1;
rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
+ INIT_LIST_HEAD(&rocker_port->trans_mem);
- rocker_port_dev_addr_init(rocker, rocker_port);
+ rocker_port_dev_addr_init(rocker_port);
dev->netdev_ops = &rocker_port_netdev_ops;
dev->ethtool_ops = &rocker_port_ethtool_ops;
- dev->swdev_ops = &rocker_port_swdev_ops;
+ dev->switchdev_ops = &rocker_port_switchdev_ops;
netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
NAPI_POLL_WEIGHT);
netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
NAPI_POLL_WEIGHT);
rocker_carrier_init(rocker_port);
- dev->features |= NETIF_F_NETNS_LOCAL |
- NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_SWITCH_OFFLOAD;
+ dev->features |= NETIF_F_NETNS_LOCAL;
err = register_netdev(dev);
if (err) {
@@ -4643,18 +4861,29 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
}
rocker->ports[port_number] = rocker_port;
- rocker_port_set_learning(rocker_port);
+ rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
- rocker_port->internal_vlan_id =
- rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
- err = rocker_port_ig_tbl(rocker_port, 0);
+ err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
if (err) {
dev_err(&pdev->dev, "install ig port table failed\n");
goto err_port_ig_tbl;
}
+ rocker_port->internal_vlan_id =
+ rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
+
+ err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+ untagged_vid, 0);
+ if (err) {
+ netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
+ goto err_untagged_vlan;
+ }
+
return 0;
+err_untagged_vlan:
+ rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_REMOVE);
err_port_ig_tbl:
unregister_netdev(dev);
err_register_netdev:
@@ -4669,7 +4898,7 @@ static int rocker_probe_ports(struct rocker *rocker)
int err;
alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
- rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+ rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
if (!rocker->ports)
return -ENOMEM;
for (i = 0; i < rocker->port_count; i++) {
@@ -4718,7 +4947,7 @@ err_enable_msix:
return err;
}
-static void rocker_msix_fini(struct rocker *rocker)
+static void rocker_msix_fini(const struct rocker *rocker)
{
pci_disable_msix(rocker->pdev);
kfree(rocker->msix_entries);
@@ -4884,7 +5113,7 @@ static struct pci_driver rocker_pci_driver = {
* Net device notifier event handler
************************************/
-static bool rocker_port_dev_check(struct net_device *dev)
+static bool rocker_port_dev_check(const struct net_device *dev)
{
return dev->netdev_ops == &rocker_port_netdev_ops;
}
@@ -4892,45 +5121,54 @@ static bool rocker_port_dev_check(struct net_device *dev)
static int rocker_port_bridge_join(struct rocker_port *rocker_port,
struct net_device *bridge)
{
+ u16 untagged_vid = 0;
int err;
+ /* Port is joining bridge, so the internal VLAN for the
+ * port is going to change to the bridge internal VLAN.
+ * Let's remove untagged VLAN (vid=0) from port and
+ * re-add once internal VLAN has changed.
+ */
+
+ err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
+ if (err)
+ return err;
+
rocker_port_internal_vlan_id_put(rocker_port,
rocker_port->dev->ifindex);
+ rocker_port->internal_vlan_id =
+ rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
rocker_port->bridge_dev = bridge;
- /* Use bridge internal VLAN ID for untagged pkts */
- err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
- if (err)
- return err;
- rocker_port->internal_vlan_id =
- rocker_port_internal_vlan_id_get(rocker_port,
- bridge->ifindex);
- return rocker_port_vlan(rocker_port, 0, 0);
+ return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+ untagged_vid, 0);
}
static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
{
+ u16 untagged_vid = 0;
int err;
- rocker_port_internal_vlan_id_put(rocker_port,
- rocker_port->bridge_dev->ifindex);
-
- rocker_port->bridge_dev = NULL;
-
- /* Use port internal VLAN ID for untagged pkts */
- err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
+ err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
if (err)
return err;
+
+ rocker_port_internal_vlan_id_put(rocker_port,
+ rocker_port->bridge_dev->ifindex);
rocker_port->internal_vlan_id =
rocker_port_internal_vlan_id_get(rocker_port,
rocker_port->dev->ifindex);
- err = rocker_port_vlan(rocker_port, 0, 0);
+
+ rocker_port->bridge_dev = NULL;
+
+ err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+ untagged_vid, 0);
if (err)
return err;
if (rocker_port->dev->flags & IFF_UP)
- err = rocker_port_fwd_enable(rocker_port);
+ err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE);
return err;
}
@@ -4992,7 +5230,8 @@ static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE;
__be32 ip_addr = *(__be32 *)n->primary_key;
- return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha);
+ return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
+ flags, ip_addr, n->ha);
}
static int rocker_netevent_event(struct notifier_block *unused,
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index a4e9591d7457..c61fbf968036 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -65,9 +65,9 @@ enum {
#define ROCKER_TEST_DMA_CTRL 0x0034
/* Rocker test register ctrl */
-#define ROCKER_TEST_DMA_CTRL_CLEAR (1 << 0)
-#define ROCKER_TEST_DMA_CTRL_FILL (1 << 1)
-#define ROCKER_TEST_DMA_CTRL_INVERT (1 << 2)
+#define ROCKER_TEST_DMA_CTRL_CLEAR BIT(0)
+#define ROCKER_TEST_DMA_CTRL_FILL BIT(1)
+#define ROCKER_TEST_DMA_CTRL_INVERT BIT(2)
/* Rocker DMA ring register offsets */
#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */
@@ -79,7 +79,7 @@ enum {
#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32)
/* Rocker dma ctrl register bits */
-#define ROCKER_DMA_DESC_CTRL_RESET (1 << 0)
+#define ROCKER_DMA_DESC_CTRL_RESET BIT(0)
/* Rocker DMA ring types */
enum rocker_dma_type {
@@ -111,7 +111,7 @@ struct rocker_desc {
u16 comp_err;
};
-#define ROCKER_DMA_DESC_COMP_ERR_GEN (1 << 15)
+#define ROCKER_DMA_DESC_COMP_ERR_GEN BIT(15)
/* Rocker DMA TLV struct */
struct rocker_tlv {
@@ -237,14 +237,14 @@ enum {
ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1,
};
-#define ROCKER_RX_FLAGS_IPV4 (1 << 0)
-#define ROCKER_RX_FLAGS_IPV6 (1 << 1)
-#define ROCKER_RX_FLAGS_CSUM_CALC (1 << 2)
-#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD (1 << 3)
-#define ROCKER_RX_FLAGS_IP_FRAG (1 << 4)
-#define ROCKER_RX_FLAGS_TCP (1 << 5)
-#define ROCKER_RX_FLAGS_UDP (1 << 6)
-#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD (1 << 7)
+#define ROCKER_RX_FLAGS_IPV4 BIT(0)
+#define ROCKER_RX_FLAGS_IPV6 BIT(1)
+#define ROCKER_RX_FLAGS_CSUM_CALC BIT(2)
+#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD BIT(3)
+#define ROCKER_RX_FLAGS_IP_FRAG BIT(4)
+#define ROCKER_RX_FLAGS_TCP BIT(5)
+#define ROCKER_RX_FLAGS_UDP BIT(6)
+#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7)
enum {
ROCKER_TLV_TX_UNSPEC,
@@ -460,6 +460,6 @@ enum rocker_of_dpa_overlay_type {
#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */
/* Rocker control bits */
-#define ROCKER_CONTROL_RESET (1 << 0)
+#define ROCKER_CONTROL_RESET BIT(0)
#endif
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 088921294448..4dd92b7b80f4 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -36,3 +36,12 @@ config SFC_SRIOV
This enables support for the SFC9000 I/O Virtualization
features, allowing accelerated network performance in
virtualized environments.
+config SFC_MCDI_LOGGING
+ bool "Solarflare SFC9000/SFC9100-family MCDI logging support"
+ depends on SFC
+ default y
+ ---help---
+ This enables support for tracing of MCDI (Management-Controller-to-
+ Driver-Interface) commands and responses, allowing debugging of
+ driver/firmware interaction. The tracing is actually enabled by
+ a sysfs file 'mcdi_logging' under the PCI device.
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 3a83c0dca8e6..ce8470fe79d5 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -3,6 +3,6 @@ sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
tenxpress.o txc43128_phy.o falcon_boards.o \
mcdi.o mcdi_port.o mcdi_mon.o ptp.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
-sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
+sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index fbb6cfa0f5f1..847643455468 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -15,6 +15,7 @@
#include "nic.h"
#include "workarounds.h"
#include "selftest.h"
+#include "ef10_sriov.h"
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/wait.h>
@@ -30,6 +31,9 @@ enum {
/* The reserved RSS context value */
#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
+/* The maximum size of a shared RSS context */
+/* TODO: this should really be from the mcdi protocol export */
+#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
/* The filter table(s) are managed by firmware and we have write-only
* access. When removing filters we must identify them to the
@@ -77,7 +81,6 @@ struct efx_ef10_filter_table {
/* An arbitrary search limit for the software hash table */
#define EFX_EF10_FILTER_SEARCH_LIMIT 200
-static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
static void efx_ef10_filter_table_remove(struct efx_nic *efx);
@@ -92,8 +95,49 @@ static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
{
- return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
+ int bar;
+
+ bar = efx->type->mem_bar;
+ return resource_size(&efx->pci_dev->resource[bar]);
+}
+
+static int efx_ef10_get_pf_index(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+ sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+
+ nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
+ return 0;
+}
+
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef10_get_vf_index(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+ sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+
+ nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
+ return 0;
}
+#endif
static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
{
@@ -117,6 +161,13 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
nic_data->datapath_caps =
MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
+ /* record the DPCPU firmware IDs to determine VEB vswitching support.
+ */
+ nic_data->rx_dpcpu_fw_id =
+ MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
+ nic_data->tx_dpcpu_fw_id =
+ MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
+
if (!(nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
netif_err(efx, drv, efx->net_dev,
@@ -147,7 +198,7 @@ static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
return rc > 0 ? rc : -ERANGE;
}
-static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
size_t outlen;
@@ -167,9 +218,66 @@ static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
return 0;
}
+static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
+ size_t outlen;
+ int num_addrs, rc;
+
+ MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
+ return -EIO;
+
+ num_addrs = MCDI_DWORD(outbuf,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
+
+ WARN_ON(num_addrs != 1);
+
+ ether_addr_copy(mac_address,
+ MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
+
+ return 0;
+}
+
+static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ return sprintf(buf, "%d\n",
+ ((efx->mcdi->fn_flags) &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
+ ? 1 : 0);
+}
+
+static ssize_t efx_ef10_show_primary_flag(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ return sprintf(buf, "%d\n",
+ ((efx->mcdi->fn_flags) &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+ ? 1 : 0);
+}
+
+static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
+ NULL);
+static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
+
static int efx_ef10_probe(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data;
+ struct net_device *net_dev = efx->net_dev;
int i, rc;
/* We can have one VI for each 8K region. However, until we
@@ -178,7 +286,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
efx->max_channels =
min_t(unsigned int,
EFX_MAX_CHANNELS,
- resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
+ efx_ef10_mem_map_size(efx) /
(EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
if (WARN_ON(efx->max_channels == 0))
return -EIO;
@@ -188,6 +296,9 @@ static int efx_ef10_probe(struct efx_nic *efx)
return -ENOMEM;
efx->nic_data = nic_data;
+ /* we assume later that we can copy from this buffer in dwords */
+ BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
+
rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
if (rc)
@@ -209,6 +320,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
/* In case we're recovering from a crash (kexec), we want to
* cancel any outstanding request by the previous user of this
* function. We send a special message using the least
@@ -230,45 +343,85 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
+ rc = device_create_file(&efx->pci_dev->dev,
+ &dev_attr_link_control_flag);
+ if (rc)
+ goto fail3;
+
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+ if (rc)
+ goto fail4;
+
+ rc = efx_ef10_get_pf_index(efx);
+ if (rc)
+ goto fail5;
+
rc = efx_ef10_init_datapath_caps(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->rx_packet_len_offset =
ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
rc = efx_mcdi_port_get_number(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->port_num = rc;
+ net_dev->dev_port = rc;
- rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
+ rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
if (rc)
- goto fail3;
+ goto fail5;
rc = efx_ef10_get_sysclk_freq(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
- /* Check whether firmware supports bug 35388 workaround */
+ /* Check whether firmware supports bug 35388 workaround.
+ * First try to enable it, then if we get EPERM, just
+ * ask if it's already enabled
+ */
rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
- if (rc == 0)
+ if (rc == 0) {
nic_data->workaround_35388 = true;
- else if (rc != -ENOSYS && rc != -ENOENT)
- goto fail3;
+ } else if (rc == -EPERM) {
+ unsigned int enabled;
+
+ rc = efx_mcdi_get_workarounds(efx, NULL, &enabled);
+ if (rc)
+ goto fail3;
+ nic_data->workaround_35388 = enabled &
+ MC_CMD_GET_WORKAROUNDS_OUT_BUG35388;
+ } else if (rc != -ENOSYS && rc != -ENOENT) {
+ goto fail5;
+ }
netif_dbg(efx, probe, efx->net_dev,
"workaround for bug 35388 is %sabled\n",
nic_data->workaround_35388 ? "en" : "dis");
rc = efx_mcdi_mon_probe(efx);
- if (rc)
- goto fail3;
+ if (rc && rc != -EPERM)
+ goto fail5;
efx_ptp_probe(efx, NULL);
+#ifdef CONFIG_SFC_SRIOV
+ if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
+ struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+
+ efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
+ } else
+#endif
+ ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
+
return 0;
+fail5:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+fail4:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
fail3:
efx_mcdi_fini(efx);
fail2:
@@ -281,7 +434,7 @@ fail1:
static int efx_ef10_free_vis(struct efx_nic *efx)
{
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ MCDI_DECLARE_BUF_ERR(outbuf);
size_t outlen;
int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
@@ -352,9 +505,9 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
static int efx_ef10_link_piobufs(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- MCDI_DECLARE_BUF(inbuf,
- max(MC_CMD_LINK_PIOBUF_IN_LEN,
- MC_CMD_UNLINK_PIOBUF_IN_LEN));
+ _MCDI_DECLARE_BUF(inbuf,
+ max(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_IN_LEN));
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
unsigned int offset, index;
@@ -363,6 +516,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
+ memset(inbuf, 0, sizeof(inbuf));
+
/* Link a buffer to each VI in the write-combining mapping */
for (index = 0; index < nic_data->n_piobufs; ++index) {
MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
@@ -475,6 +630,25 @@ static void efx_ef10_remove(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
+#ifdef CONFIG_SFC_SRIOV
+ struct efx_ef10_nic_data *nic_data_pf;
+ struct pci_dev *pci_dev_pf;
+ struct efx_nic *efx_pf;
+ struct ef10_vf *vf;
+
+ if (efx->pci_dev->is_virtfn) {
+ pci_dev_pf = efx->pci_dev->physfn;
+ if (pci_dev_pf) {
+ efx_pf = pci_get_drvdata(pci_dev_pf);
+ nic_data_pf = efx_pf->nic_data;
+ vf = nic_data_pf->vf + nic_data->vf_index;
+ vf->efx = NULL;
+ } else
+ netif_info(efx, drv, efx->net_dev,
+ "Could not get the PF id from VF\n");
+ }
+#endif
+
efx_ptp_remove(efx);
efx_mcdi_mon_remove(efx);
@@ -490,11 +664,78 @@ static void efx_ef10_remove(struct efx_nic *efx)
if (!nic_data->must_restore_piobufs)
efx_ef10_free_piobufs(efx);
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
+
efx_mcdi_fini(efx);
efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
kfree(nic_data);
}
+static int efx_ef10_probe_pf(struct efx_nic *efx)
+{
+ return efx_ef10_probe(efx);
+}
+
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef10_probe_vf(struct efx_nic *efx)
+{
+ int rc;
+ struct pci_dev *pci_dev_pf;
+
+ /* If the parent PF has no VF data structure, it doesn't know about this
+ * VF so fail probe. The VF needs to be re-created. This can happen
+ * if the PF driver is unloaded while the VF is assigned to a guest.
+ */
+ pci_dev_pf = efx->pci_dev->physfn;
+ if (pci_dev_pf) {
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+ struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
+
+ if (!nic_data_pf->vf) {
+ netif_info(efx, drv, efx->net_dev,
+ "The VF cannot link to its parent PF; "
+ "please destroy and re-create the VF\n");
+ return -EBUSY;
+ }
+ }
+
+ rc = efx_ef10_probe(efx);
+ if (rc)
+ return rc;
+
+ rc = efx_ef10_get_vf_index(efx);
+ if (rc)
+ goto fail;
+
+ if (efx->pci_dev->is_virtfn) {
+ if (efx->pci_dev->physfn) {
+ struct efx_nic *efx_pf =
+ pci_get_drvdata(efx->pci_dev->physfn);
+ struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ nic_data_p->vf[nic_data->vf_index].efx = efx;
+ nic_data_p->vf[nic_data->vf_index].pci_dev =
+ efx->pci_dev;
+ } else
+ netif_info(efx, drv, efx->net_dev,
+ "Could not get the PF id from VF\n");
+ }
+
+ return 0;
+
+fail:
+ efx_ef10_remove(efx);
+ return rc;
+}
+#else
+static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
+{
+ return 0;
+}
+#endif
+
static int efx_ef10_alloc_vis(struct efx_nic *efx,
unsigned int min_vis, unsigned int max_vis)
{
@@ -687,7 +928,9 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false;
}
- efx_ef10_rx_push_rss_config(efx);
+ /* don't fail init if RSS setup doesn't work */
+ efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+
return 0;
}
@@ -702,6 +945,14 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
}
+static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
+{
+ if (reason == RESET_TYPE_MC_FAILURE)
+ return RESET_TYPE_DATAPATH;
+
+ return efx_mcdi_map_reset_reason(reason);
+}
+
static int efx_ef10_map_reset_flags(u32 *flags)
{
enum {
@@ -760,93 +1011,112 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
- EF10_DMA_STAT(tx_bytes, TX_BYTES),
- EF10_DMA_STAT(tx_packets, TX_PKTS),
- EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
- EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
- EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
- EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
- EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
- EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
- EF10_DMA_STAT(tx_64, TX_64_PKTS),
- EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
- EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
- EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
- EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
- EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
- EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
- EF10_DMA_STAT(rx_bytes, RX_BYTES),
- EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
- EF10_OTHER_STAT(rx_good_bytes),
- EF10_OTHER_STAT(rx_bad_bytes),
- EF10_DMA_STAT(rx_packets, RX_PKTS),
- EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
- EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
- EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
- EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
- EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
- EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
- EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
- EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
- EF10_DMA_STAT(rx_64, RX_64_PKTS),
- EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
- EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
- EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
- EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
- EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
- EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
- EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
- EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
- EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
- EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
- EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
- EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+ EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
+ EF10_DMA_STAT(port_tx_packets, TX_PKTS),
+ EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
+ EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
+ EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
+ EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
+ EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
+ EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
+ EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
+ EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
+ EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
+ EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
+ EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
+ EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
+ EF10_OTHER_STAT(port_rx_good_bytes),
+ EF10_OTHER_STAT(port_rx_bad_bytes),
+ EF10_DMA_STAT(port_rx_packets, RX_PKTS),
+ EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
+ EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
+ EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
+ EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
+ EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
+ EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
+ EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
+ EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
+ EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
+ EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
+ EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
+ EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
+ EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
+ EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
+ EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
+ EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
+ EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
GENERIC_SW_STAT(rx_nodesc_trunc),
GENERIC_SW_STAT(rx_noskb_drops),
- EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
- EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
- EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
- EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
- EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
- EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
- EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
- EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
- EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
- EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
- EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
- EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
+ EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
+ EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
+ EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
+ EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
+ EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
+ EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
+ EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
+ EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
+ EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
+ EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
+ EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
+ EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
+ EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
+ EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
+ EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
+ EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
+ EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
+ EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
+ EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
+ EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
+ EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
+ EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
+ EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
+ EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
+ EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
+ EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
+ EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
+ EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
+ EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
+ EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
};
-#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
- (1ULL << EF10_STAT_tx_packets) | \
- (1ULL << EF10_STAT_tx_pause) | \
- (1ULL << EF10_STAT_tx_unicast) | \
- (1ULL << EF10_STAT_tx_multicast) | \
- (1ULL << EF10_STAT_tx_broadcast) | \
- (1ULL << EF10_STAT_rx_bytes) | \
- (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
- (1ULL << EF10_STAT_rx_good_bytes) | \
- (1ULL << EF10_STAT_rx_bad_bytes) | \
- (1ULL << EF10_STAT_rx_packets) | \
- (1ULL << EF10_STAT_rx_good) | \
- (1ULL << EF10_STAT_rx_bad) | \
- (1ULL << EF10_STAT_rx_pause) | \
- (1ULL << EF10_STAT_rx_control) | \
- (1ULL << EF10_STAT_rx_unicast) | \
- (1ULL << EF10_STAT_rx_multicast) | \
- (1ULL << EF10_STAT_rx_broadcast) | \
- (1ULL << EF10_STAT_rx_lt64) | \
- (1ULL << EF10_STAT_rx_64) | \
- (1ULL << EF10_STAT_rx_65_to_127) | \
- (1ULL << EF10_STAT_rx_128_to_255) | \
- (1ULL << EF10_STAT_rx_256_to_511) | \
- (1ULL << EF10_STAT_rx_512_to_1023) | \
- (1ULL << EF10_STAT_rx_1024_to_15xx) | \
- (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
- (1ULL << EF10_STAT_rx_gtjumbo) | \
- (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
- (1ULL << EF10_STAT_rx_overflow) | \
- (1ULL << EF10_STAT_rx_nodesc_drops) | \
+#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
+ (1ULL << EF10_STAT_port_tx_packets) | \
+ (1ULL << EF10_STAT_port_tx_pause) | \
+ (1ULL << EF10_STAT_port_tx_unicast) | \
+ (1ULL << EF10_STAT_port_tx_multicast) | \
+ (1ULL << EF10_STAT_port_tx_broadcast) | \
+ (1ULL << EF10_STAT_port_rx_bytes) | \
+ (1ULL << \
+ EF10_STAT_port_rx_bytes_minus_good_bytes) | \
+ (1ULL << EF10_STAT_port_rx_good_bytes) | \
+ (1ULL << EF10_STAT_port_rx_bad_bytes) | \
+ (1ULL << EF10_STAT_port_rx_packets) | \
+ (1ULL << EF10_STAT_port_rx_good) | \
+ (1ULL << EF10_STAT_port_rx_bad) | \
+ (1ULL << EF10_STAT_port_rx_pause) | \
+ (1ULL << EF10_STAT_port_rx_control) | \
+ (1ULL << EF10_STAT_port_rx_unicast) | \
+ (1ULL << EF10_STAT_port_rx_multicast) | \
+ (1ULL << EF10_STAT_port_rx_broadcast) | \
+ (1ULL << EF10_STAT_port_rx_lt64) | \
+ (1ULL << EF10_STAT_port_rx_64) | \
+ (1ULL << EF10_STAT_port_rx_65_to_127) | \
+ (1ULL << EF10_STAT_port_rx_128_to_255) | \
+ (1ULL << EF10_STAT_port_rx_256_to_511) | \
+ (1ULL << EF10_STAT_port_rx_512_to_1023) |\
+ (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
+ (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
+ (1ULL << EF10_STAT_port_rx_gtjumbo) | \
+ (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
+ (1ULL << EF10_STAT_port_rx_overflow) | \
+ (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
(1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
(1ULL << GENERIC_STAT_rx_noskb_drops))
@@ -854,39 +1124,39 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
* switchable port we do not expose these because they might not
* include all the packets they should.
*/
-#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
- (1ULL << EF10_STAT_tx_lt64) | \
- (1ULL << EF10_STAT_tx_64) | \
- (1ULL << EF10_STAT_tx_65_to_127) | \
- (1ULL << EF10_STAT_tx_128_to_255) | \
- (1ULL << EF10_STAT_tx_256_to_511) | \
- (1ULL << EF10_STAT_tx_512_to_1023) | \
- (1ULL << EF10_STAT_tx_1024_to_15xx) | \
- (1ULL << EF10_STAT_tx_15xx_to_jumbo))
+#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
+ (1ULL << EF10_STAT_port_tx_lt64) | \
+ (1ULL << EF10_STAT_port_tx_64) | \
+ (1ULL << EF10_STAT_port_tx_65_to_127) |\
+ (1ULL << EF10_STAT_port_tx_128_to_255) |\
+ (1ULL << EF10_STAT_port_tx_256_to_511) |\
+ (1ULL << EF10_STAT_port_tx_512_to_1023) |\
+ (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
+ (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
/* These statistics are only provided by the 40G MAC. For a 10G/40G
* switchable port we do expose these because the errors will otherwise
* be silent.
*/
-#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
- (1ULL << EF10_STAT_rx_length_error))
+#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
+ (1ULL << EF10_STAT_port_rx_length_error))
/* These statistics are only provided if the firmware supports the
* capability PM_AND_RXDP_COUNTERS.
*/
#define HUNT_PM_AND_RXDP_STAT_MASK ( \
- (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
- (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
- (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
- (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
- (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
- (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
- (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
- (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
- (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
- (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
- (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \
- (1ULL << EF10_STAT_rx_dp_hlb_wait))
+ (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
+ (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
+ (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
+ (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
+ (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
{
@@ -894,6 +1164,10 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
u32 port_caps = efx_mcdi_phy_get_caps(efx);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ if (!(efx->mcdi->fn_flags &
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
+ return 0;
+
if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
else
@@ -908,13 +1182,28 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
{
- u64 raw_mask = efx_ef10_raw_stat_mask(efx);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 raw_mask[2];
+
+ raw_mask[0] = efx_ef10_raw_stat_mask(efx);
+
+ /* Only show vadaptor stats when EVB capability is present */
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
+ raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
+ raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
+ } else {
+ raw_mask[1] = 0;
+ }
#if BITS_PER_LONG == 64
- mask[0] = raw_mask;
+ mask[0] = raw_mask[0];
+ mask[1] = raw_mask[1];
#else
- mask[0] = raw_mask & 0xffffffff;
- mask[1] = raw_mask >> 32;
+ mask[0] = raw_mask[0] & 0xffffffff;
+ mask[1] = raw_mask[0] >> 32;
+ mask[2] = raw_mask[1] & 0xffffffff;
+ mask[3] = raw_mask[1] >> 32;
#endif
}
@@ -927,7 +1216,51 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
mask, names);
}
-static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
+static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
+ size_t stats_count = 0, index;
+
+ efx_ef10_get_stat_mask(efx, mask);
+
+ if (full_stats) {
+ for_each_set_bit(index, mask, EF10_STAT_COUNT) {
+ if (efx_ef10_stat_desc[index].name) {
+ *full_stats++ = stats[index];
+ ++stats_count;
+ }
+ }
+ }
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
+ stats[EF10_STAT_rx_multicast] +
+ stats[EF10_STAT_rx_broadcast];
+ core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
+ stats[EF10_STAT_tx_multicast] +
+ stats[EF10_STAT_tx_broadcast];
+ core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
+ stats[EF10_STAT_rx_multicast_bytes] +
+ stats[EF10_STAT_rx_broadcast_bytes];
+ core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
+ stats[EF10_STAT_tx_multicast_bytes] +
+ stats[EF10_STAT_tx_broadcast_bytes];
+ core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
+ stats[GENERIC_STAT_rx_noskb_drops];
+ core_stats->multicast = stats[EF10_STAT_rx_multicast];
+ core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
+ core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
+ core_stats->rx_errors = core_stats->rx_crc_errors;
+ core_stats->tx_errors = stats[EF10_STAT_tx_bad];
+ }
+
+ return stats_count;
+}
+
+static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
@@ -952,67 +1285,114 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
- efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
- stats[EF10_STAT_rx_good_bytes] =
- stats[EF10_STAT_rx_bytes] -
- stats[EF10_STAT_rx_bytes_minus_good_bytes];
- efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
- stats[EF10_STAT_rx_bytes_minus_good_bytes]);
+ efx_nic_fix_nodesc_drop_stat(efx,
+ &stats[EF10_STAT_port_rx_nodesc_drops]);
+ stats[EF10_STAT_port_rx_good_bytes] =
+ stats[EF10_STAT_port_rx_bytes] -
+ stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
+ efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
+ stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
efx_update_sw_stats(efx, stats);
return 0;
}
-static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
- struct rtnl_link_stats64 *core_stats)
+static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
{
- DECLARE_BITMAP(mask, EF10_STAT_COUNT);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- u64 *stats = nic_data->stats;
- size_t stats_count = 0, index;
int retry;
- efx_ef10_get_stat_mask(efx, mask);
-
/* If we're unlucky enough to read statistics during the DMA, wait
* up to 10ms for it to finish (typically takes <500us)
*/
for (retry = 0; retry < 100; ++retry) {
- if (efx_ef10_try_update_nic_stats(efx) == 0)
+ if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
break;
udelay(100);
}
- if (full_stats) {
- for_each_set_bit(index, mask, EF10_STAT_COUNT) {
- if (efx_ef10_stat_desc[index].name) {
- *full_stats++ = stats[index];
- ++stats_count;
- }
- }
+ return efx_ef10_update_stats_common(efx, full_stats, core_stats);
+}
+
+static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+ __le64 generation_start, generation_end;
+ u64 *stats = nic_data->stats;
+ u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
+ struct efx_buffer stats_buf;
+ __le64 *dma_stats;
+ int rc;
+
+ spin_unlock_bh(&efx->stats_lock);
+
+ if (in_interrupt()) {
+ /* If in atomic context, cannot update stats. Just update the
+ * software stats and return so the caller can continue.
+ */
+ spin_lock_bh(&efx->stats_lock);
+ efx_update_sw_stats(efx, stats);
+ return 0;
}
- if (core_stats) {
- core_stats->rx_packets = stats[EF10_STAT_rx_packets];
- core_stats->tx_packets = stats[EF10_STAT_tx_packets];
- core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
- core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
- core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] +
- stats[GENERIC_STAT_rx_nodesc_trunc] +
- stats[GENERIC_STAT_rx_noskb_drops];
- core_stats->multicast = stats[EF10_STAT_rx_multicast];
- core_stats->rx_length_errors =
- stats[EF10_STAT_rx_gtjumbo] +
- stats[EF10_STAT_rx_length_error];
- core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
- core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
- core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
- core_stats->rx_errors = (core_stats->rx_length_errors +
- core_stats->rx_crc_errors +
- core_stats->rx_frame_errors);
+ efx_ef10_get_stat_mask(efx, mask);
+
+ rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
+ if (rc) {
+ spin_lock_bh(&efx->stats_lock);
+ return rc;
}
- return stats_count;
+ dma_stats = stats_buf.addr;
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+
+ MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
+ MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, 1);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ spin_lock_bh(&efx->stats_lock);
+ if (rc) {
+ /* Expect ENOENT if DMA queues have not been set up */
+ if (rc != -ENOENT || atomic_read(&efx->active_queues))
+ efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
+ sizeof(inbuf), NULL, 0, rc);
+ goto out;
+ }
+
+ generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
+ WARN_ON_ONCE(1);
+ goto out;
+ }
+ rmb();
+ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
+ stats, stats_buf.addr, false);
+ rmb();
+ generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+ if (generation_end != generation_start) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ efx_update_sw_stats(efx, stats);
+out:
+ efx_nic_free_buffer(efx, &stats_buf);
+ return rc;
+}
+
+static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ if (efx_ef10_try_update_nic_stats_vf(efx))
+ return 0;
+
+ return efx_ef10_update_stats_common(efx, full_stats, core_stats);
}
static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
@@ -1044,6 +1424,14 @@ static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
}
}
+static void efx_ef10_get_wol_vf(struct efx_nic *efx,
+ struct ethtool_wolinfo *wol) {}
+
+static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
+{
+ return -EOPNOTSUPP;
+}
+
static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
{
wol->supported = 0;
@@ -1123,13 +1511,17 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
/* All our allocations have been reset */
efx_ef10_reset_mc_allocations(efx);
+ /* Driver-created vswitches and vports must be re-created */
+ nic_data->must_probe_vswitching = true;
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
/* The datapath firmware might have been changed */
nic_data->must_check_datapath_caps = true;
/* MAC statistics have been cleared on the NIC; clear the local
* statistic that we update with efx_update_diff_stat().
*/
- nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
+ nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
return -EIO;
}
@@ -1232,16 +1624,17 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
- MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
- size_t inlen, outlen;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t inlen;
dma_addr_t dma_addr;
efx_qword_t *txd;
int rc;
int i;
+ BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
@@ -1251,7 +1644,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
dma_addr = tx_queue->txd.buf.dma_addr;
@@ -1266,7 +1659,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
- outbuf, sizeof(outbuf), &outlen);
+ NULL, 0, NULL);
if (rc)
goto fail;
@@ -1299,7 +1692,7 @@ fail:
static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = tx_queue->efx;
size_t outlen;
int rc;
@@ -1378,19 +1771,33 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
}
}
-static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
+static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
+ bool exclusive, unsigned *context_size)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
+ u32 alloc_type = exclusive ?
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
+ unsigned rss_spread = exclusive ?
+ efx->rss_spread :
+ min(rounddown_pow_of_two(efx->rss_spread),
+ EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
+
+ if (!exclusive && rss_spread == 1) {
+ *context = EFX_EF10_RSS_CONTEXT_INVALID;
+ if (context_size)
+ *context_size = 1;
+ return 0;
+ }
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
- EVB_PORT_ID_ASSIGNED);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
- MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
- EFX_MAX_CHANNELS);
+ nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
@@ -1402,6 +1809,9 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
*context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+ if (context_size)
+ *context_size = rss_spread;
+
return 0;
}
@@ -1418,7 +1828,8 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
WARN_ON(rc != 0);
}
-static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
+static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
+ const u32 *rx_indir_table)
{
MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
@@ -1432,7 +1843,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
MCDI_PTR(tablebuf,
RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
- (u8) efx->rx_indir_table[i];
+ (u8) rx_indir_table[i];
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
sizeof(tablebuf), NULL, 0, NULL);
@@ -1460,27 +1871,119 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
}
-static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
+static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
+ unsigned *context_size)
{
+ u32 new_rx_rss_context;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- int rc;
+ int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
+ false, context_size);
- netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
+ if (rc != 0)
+ return rc;
- if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
- rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
- if (rc != 0)
- goto fail;
+ nic_data->rx_rss_context = new_rx_rss_context;
+ nic_data->rx_rss_context_exclusive = false;
+ efx_set_default_rx_indir_table(efx);
+ return 0;
+}
+
+static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
+ const u32 *rx_indir_table)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+ u32 new_rx_rss_context;
+
+ if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID ||
+ !nic_data->rx_rss_context_exclusive) {
+ rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
+ true, NULL);
+ if (rc == -EOPNOTSUPP)
+ return rc;
+ else if (rc != 0)
+ goto fail1;
+ } else {
+ new_rx_rss_context = nic_data->rx_rss_context;
}
- rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
+ rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
+ rx_indir_table);
if (rc != 0)
- goto fail;
+ goto fail2;
- return;
+ if (nic_data->rx_rss_context != new_rx_rss_context)
+ efx_ef10_rx_free_indir_table(efx);
+ nic_data->rx_rss_context = new_rx_rss_context;
+ nic_data->rx_rss_context_exclusive = true;
+ if (rx_indir_table != efx->rx_indir_table)
+ memcpy(efx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rx_indir_table));
+ return 0;
-fail:
+fail2:
+ if (new_rx_rss_context != nic_data->rx_rss_context)
+ efx_ef10_free_rss_context(efx, new_rx_rss_context);
+fail1:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
+{
+ int rc;
+
+ if (efx->rss_spread == 1)
+ return 0;
+
+ rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
+
+ if (rc == -ENOBUFS && !user) {
+ unsigned context_size;
+ bool mismatch = false;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch;
+ i++)
+ mismatch = rx_indir_table[i] !=
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
+
+ rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
+ if (rc == 0) {
+ if (context_size != efx->rss_spread)
+ netif_warn(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one of"
+ " different size."
+ " Wanted %u, got %u.\n",
+ efx->rss_spread, context_size);
+ else if (mismatch)
+ netif_warn(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one but"
+ " could not apply custom"
+ " indirection.\n");
+ else
+ netif_info(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one.\n");
+ }
+ }
+ return rc;
+}
+
+static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table
+ __attribute__ ((unused)))
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (user)
+ return -EOPNOTSUPP;
+ if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
+ return 0;
+ return efx_ef10_rx_push_shared_rss_config(efx, NULL);
}
static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
@@ -1496,14 +1999,15 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
MCDI_DECLARE_BUF(inbuf,
MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
- MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = rx_queue->efx;
- size_t inlen, outlen;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t inlen;
dma_addr_t dma_addr;
int rc;
int i;
+ BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
rx_queue->scatter_n = 0;
rx_queue->scatter_len = 0;
@@ -1517,7 +2021,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
INIT_RXQ_IN_FLAG_PREFIX, 1,
INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
dma_addr = rx_queue->rxd.buf.dma_addr;
@@ -1532,7 +2036,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
- outbuf, sizeof(outbuf), &outlen);
+ NULL, 0, NULL);
if (rc)
netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
efx_rx_queue_index(rx_queue));
@@ -1541,7 +2045,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = rx_queue->efx;
size_t outlen;
int rc;
@@ -1703,7 +2207,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
static void efx_ef10_ev_fini(struct efx_channel *channel)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = channel->efx;
size_t outlen;
int rc;
@@ -2286,11 +2790,12 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
match_fields);
}
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
@@ -3055,6 +3560,9 @@ fail:
return rc;
}
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
static void efx_ef10_filter_table_restore(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3064,9 +3572,14 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
bool failed = false;
int rc;
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
if (!nic_data->must_restore_filters)
return;
+ if (!table)
+ return;
+
spin_lock_bh(&efx->filter_lock);
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
@@ -3102,6 +3615,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
nic_data->must_restore_filters = false;
}
+/* Caller must hold efx->filter_sem for write */
static void efx_ef10_filter_table_remove(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3110,6 +3624,10 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
unsigned int filter_idx;
int rc;
+ efx->filter_state = NULL;
+ if (!table)
+ return;
+
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (!spec)
@@ -3135,6 +3653,9 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
kfree(table);
}
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3149,6 +3670,9 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
if (!efx_dev_registered(efx))
return;
+ if (!table)
+ return;
+
/* Mark old filters that may need to be removed */
spin_lock_bh(&efx->filter_lock);
n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
@@ -3280,6 +3804,78 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
WARN_ON(remove_failed);
}
+static int efx_ef10_set_mac_address(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ bool was_enabled = efx->port_enabled;
+ int rc;
+
+ efx_device_detach_sync(efx);
+ efx_net_stop(efx->net_dev);
+ down_write(&efx->filter_sem);
+ efx_ef10_filter_table_remove(efx);
+
+ ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
+ efx->net_dev->dev_addr);
+ MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
+ nic_data->vport_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+
+ efx_ef10_filter_table_probe(efx);
+ up_write(&efx->filter_sem);
+ if (was_enabled)
+ efx_net_open(efx->net_dev);
+ netif_device_attach(efx->net_dev);
+
+#if !defined(CONFIG_SFC_SRIOV)
+ if (rc == -EPERM)
+ netif_err(efx, drv, efx->net_dev,
+ "Cannot change MAC address; use sfboot to enable mac-spoofing"
+ " on this interface\n");
+#else
+ if (rc == -EPERM) {
+ struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+
+ /* Switch to PF and change MAC address on vport */
+ if (efx->pci_dev->is_virtfn && pci_dev_pf) {
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+
+ if (!efx_ef10_sriov_set_vf_mac(efx_pf,
+ nic_data->vf_index,
+ efx->net_dev->dev_addr))
+ return 0;
+ }
+ netif_err(efx, drv, efx->net_dev,
+ "Cannot change MAC address; use sfboot to enable mac-spoofing"
+ " on this interface\n");
+ } else if (efx->pci_dev->is_virtfn) {
+ /* Successfully changed by VF (with MAC spoofing), so update the
+ * parent PF if possible.
+ */
+ struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+
+ if (pci_dev_pf) {
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+ struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
+ unsigned int i;
+
+ for (i = 0; i < efx_pf->vf_count; ++i) {
+ struct ef10_vf *vf = nic_data->vf + i;
+
+ if (vf->efx == efx) {
+ ether_addr_copy(vf->mac,
+ efx->net_dev->dev_addr);
+ return 0;
+ }
+ }
+ }
+ }
+#endif
+ return rc;
+}
+
static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
{
efx_ef10_filter_sync_rx_mode(efx);
@@ -3287,6 +3883,13 @@ static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
return efx_mcdi_set_mac(efx);
}
+static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
+{
+ efx_ef10_filter_sync_rx_mode(efx);
+
+ return 0;
+}
+
static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
@@ -3494,6 +4097,9 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
}
+static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
+ u32 host_time) {}
+
static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
bool temp)
{
@@ -3571,6 +4177,12 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
return 0;
}
+static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ return -EOPNOTSUPP;
+}
+
static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
struct hwtstamp_config *init)
{
@@ -3607,14 +4219,118 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
}
}
+const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
+ .is_vf = true,
+ .mem_bar = EFX_MEM_VF_BAR,
+ .mem_map_size = efx_ef10_mem_map_size,
+ .probe = efx_ef10_probe_vf,
+ .remove = efx_ef10_remove,
+ .dimension_resources = efx_ef10_dimension_resources,
+ .init = efx_ef10_init_nic,
+ .fini = efx_port_dummy_op_void,
+ .map_reset_reason = efx_ef10_map_reset_reason,
+ .map_reset_flags = efx_ef10_map_reset_flags,
+ .reset = efx_ef10_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_ef10_fini_dmaq,
+ .prepare_flr = efx_ef10_prepare_flr,
+ .finish_flr = efx_port_dummy_op_void,
+ .describe_stats = efx_ef10_describe_stats,
+ .update_stats = efx_ef10_update_stats_vf,
+ .start_stats = efx_port_dummy_op_void,
+ .pull_stats = efx_port_dummy_op_void,
+ .stop_stats = efx_port_dummy_op_void,
+ .set_id_led = efx_mcdi_set_id_led,
+ .push_irq_moderation = efx_ef10_push_irq_moderation,
+ .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = efx_ef10_get_wol_vf,
+ .set_wol = efx_ef10_set_wol_vf,
+ .resume_wol = efx_port_dummy_op_void,
+ .mcdi_request = efx_ef10_mcdi_request,
+ .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+ .mcdi_read_response = efx_ef10_mcdi_read_response,
+ .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef10_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .irq_handle_msi = efx_ef10_msi_interrupt,
+ .irq_handle_legacy = efx_ef10_legacy_interrupt,
+ .tx_probe = efx_ef10_tx_probe,
+ .tx_init = efx_ef10_tx_init,
+ .tx_remove = efx_ef10_tx_remove,
+ .tx_write = efx_ef10_tx_write,
+ .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
+ .rx_probe = efx_ef10_rx_probe,
+ .rx_init = efx_ef10_rx_init,
+ .rx_remove = efx_ef10_rx_remove,
+ .rx_write = efx_ef10_rx_write,
+ .rx_defer_refill = efx_ef10_rx_defer_refill,
+ .ev_probe = efx_ef10_ev_probe,
+ .ev_init = efx_ef10_ev_init,
+ .ev_fini = efx_ef10_ev_fini,
+ .ev_remove = efx_ef10_ev_remove,
+ .ev_process = efx_ef10_ev_process,
+ .ev_read_ack = efx_ef10_ev_read_ack,
+ .ev_test_generate = efx_ef10_ev_test_generate,
+ .filter_table_probe = efx_ef10_filter_table_probe,
+ .filter_table_restore = efx_ef10_filter_table_restore,
+ .filter_table_remove = efx_ef10_filter_table_remove,
+ .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
+ .filter_insert = efx_ef10_filter_insert,
+ .filter_remove_safe = efx_ef10_filter_remove_safe,
+ .filter_get_safe = efx_ef10_filter_get_safe,
+ .filter_clear_rx = efx_ef10_filter_clear_rx,
+ .filter_count_rx_used = efx_ef10_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_ef10_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = efx_port_dummy_op_int,
+#endif
+ .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
+#ifdef CONFIG_SFC_SRIOV
+ .vswitching_probe = efx_ef10_vswitching_probe_vf,
+ .vswitching_restore = efx_ef10_vswitching_restore_vf,
+ .vswitching_remove = efx_ef10_vswitching_remove_vf,
+ .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id,
+#endif
+ .get_mac_address = efx_ef10_get_mac_address_vf,
+ .set_mac_address = efx_ef10_set_mac_address,
+
+ .revision = EFX_REV_HUNT_A0,
+ .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
+ .can_rx_scatter = true,
+ .always_rx_scatter = true,
+ .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+ .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .mcdi_max_ver = 2,
+ .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
+};
+
const struct efx_nic_type efx_hunt_a0_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = efx_ef10_mem_map_size,
- .probe = efx_ef10_probe,
+ .probe = efx_ef10_probe_pf,
.remove = efx_ef10_remove,
.dimension_resources = efx_ef10_dimension_resources,
.init = efx_ef10_init_nic,
.fini = efx_port_dummy_op_void,
- .map_reset_reason = efx_mcdi_map_reset_reason,
+ .map_reset_reason = efx_ef10_map_reset_reason,
.map_reset_flags = efx_ef10_map_reset_flags,
.reset = efx_ef10_reset,
.probe_port = efx_mcdi_port_probe,
@@ -3623,7 +4339,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.prepare_flr = efx_ef10_prepare_flr,
.finish_flr = efx_port_dummy_op_void,
.describe_stats = efx_ef10_describe_stats,
- .update_stats = efx_ef10_update_stats,
+ .update_stats = efx_ef10_update_stats_pf,
.start_stats = efx_mcdi_mac_start_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
@@ -3650,7 +4366,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write,
- .rx_push_rss_config = efx_ef10_rx_push_rss_config,
+ .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove,
@@ -3689,11 +4405,24 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_ef10_sriov_configure,
.sriov_init = efx_ef10_sriov_init,
.sriov_fini = efx_ef10_sriov_fini,
- .sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed,
.sriov_wanted = efx_ef10_sriov_wanted,
.sriov_reset = efx_ef10_sriov_reset,
+ .sriov_flr = efx_ef10_sriov_flr,
+ .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
+ .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
+ .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
+ .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
+ .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
+ .vswitching_probe = efx_ef10_vswitching_probe_pf,
+ .vswitching_restore = efx_ef10_vswitching_restore_pf,
+ .vswitching_remove = efx_ef10_vswitching_remove_pf,
+#endif
+ .get_mac_address = efx_ef10_get_mac_address_pf,
+ .set_mac_address = efx_ef10_set_mac_address,
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
new file mode 100644
index 000000000000..6c9b6e45509a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -0,0 +1,783 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "net_driver.h"
+#include "ef10_sriov.h"
+#include "efx.h"
+#include "nic.h"
+#include "mcdi_pcol.h"
+
+static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id,
+ unsigned int vf_fn)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_EVB_PORT_ASSIGN_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ MCDI_SET_DWORD(inbuf, EVB_PORT_ASSIGN_IN_PORT_ID, port_id);
+ MCDI_POPULATE_DWORD_2(inbuf, EVB_PORT_ASSIGN_IN_FUNCTION,
+ EVB_PORT_ASSIGN_IN_PF, nic_data->pf_index,
+ EVB_PORT_ASSIGN_IN_VF, vf_fn);
+
+ return efx_mcdi_rpc(efx, MC_CMD_EVB_PORT_ASSIGN, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_add_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_del_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id,
+ unsigned int vswitch_type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_ALLOC_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_TYPE, vswitch_type);
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 2);
+ MCDI_POPULATE_DWORD_1(inbuf, VSWITCH_ALLOC_IN_FLAGS,
+ VSWITCH_ALLOC_IN_FLAG_AUTO_PORT, 0);
+
+ /* Quietly try to allocate 2 VLAN tags */
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VSWITCH_ALLOC, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ /* If 2 VLAN tags is too many, revert to trying with 1 VLAN tags */
+ if (rc == -EPROTO) {
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 1);
+ rc = efx_mcdi_rpc(efx, MC_CMD_VSWITCH_ALLOC, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+ } else if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_VSWITCH_ALLOC,
+ MC_CMD_VSWITCH_ALLOC_IN_LEN,
+ NULL, 0, rc);
+ }
+ return rc;
+}
+
+static int efx_ef10_vswitch_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VSWITCH_FREE_IN_UPSTREAM_PORT_ID, port_id);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VSWITCH_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_alloc(struct efx_nic *efx,
+ unsigned int port_id_in,
+ unsigned int vport_type,
+ u16 vlan,
+ unsigned int *port_id_out)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ALLOC_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_ALLOC_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ EFX_WARN_ON_PARANOID(!port_id_out);
+
+ MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_UPSTREAM_PORT_ID, port_id_in);
+ MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_TYPE, vport_type);
+ MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_NUM_VLAN_TAGS,
+ (vlan != EFX_EF10_NO_VLAN));
+ MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_FLAGS,
+ VPORT_ALLOC_IN_FLAG_AUTO_PORT, 0);
+ if (vlan != EFX_EF10_NO_VLAN)
+ MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_VLAN_TAGS,
+ VPORT_ALLOC_IN_VLAN_TAG_0, vlan);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_VPORT_ALLOC_OUT_LEN)
+ return -EIO;
+
+ *port_id_out = MCDI_DWORD(outbuf, VPORT_ALLOC_OUT_VPORT_ID);
+ return 0;
+}
+
+static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_FREE_IN_VPORT_ID, port_id);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+ return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int i;
+
+ if (!nic_data->vf)
+ return;
+
+ for (i = 0; i < efx->vf_count; i++) {
+ struct ef10_vf *vf = nic_data->vf + i;
+
+ /* If VF is assigned, do not free the vport */
+ if (vf->pci_dev &&
+ vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+ continue;
+
+ if (vf->vport_assigned) {
+ efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, i);
+ vf->vport_assigned = 0;
+ }
+
+ if (!is_zero_ether_addr(vf->mac)) {
+ efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac);
+ eth_zero_addr(vf->mac);
+ }
+
+ if (vf->vport_id) {
+ efx_ef10_vport_free(efx, vf->vport_id);
+ vf->vport_id = 0;
+ }
+
+ vf->efx = NULL;
+ }
+}
+
+static void efx_ef10_sriov_free_vf_vswitching(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ efx_ef10_sriov_free_vf_vports(efx);
+ kfree(nic_data->vf);
+ nic_data->vf = NULL;
+}
+
+static int efx_ef10_sriov_assign_vf_vport(struct efx_nic *efx,
+ unsigned int vf_i)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf = nic_data->vf + vf_i;
+ int rc;
+
+ if (WARN_ON_ONCE(!nic_data->vf))
+ return -EOPNOTSUPP;
+
+ rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+ vf->vlan, &vf->vport_id);
+ if (rc)
+ return rc;
+
+ rc = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
+ if (rc) {
+ eth_zero_addr(vf->mac);
+ return rc;
+ }
+
+ rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+ if (rc)
+ return rc;
+
+ vf->vport_assigned = 1;
+ return 0;
+}
+
+static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int i;
+ int rc;
+
+ nic_data->vf = kcalloc(efx->vf_count, sizeof(struct ef10_vf),
+ GFP_KERNEL);
+ if (!nic_data->vf)
+ return -ENOMEM;
+
+ for (i = 0; i < efx->vf_count; i++) {
+ random_ether_addr(nic_data->vf[i].mac);
+ nic_data->vf[i].efx = NULL;
+ nic_data->vf[i].vlan = EFX_EF10_NO_VLAN;
+
+ rc = efx_ef10_sriov_assign_vf_vport(efx, i);
+ if (rc)
+ goto fail;
+ }
+
+ return 0;
+fail:
+ efx_ef10_sriov_free_vf_vports(efx);
+ kfree(nic_data->vf);
+ nic_data->vf = NULL;
+ return rc;
+}
+
+static int efx_ef10_sriov_restore_vf_vswitching(struct efx_nic *efx)
+{
+ unsigned int i;
+ int rc;
+
+ for (i = 0; i < efx->vf_count; i++) {
+ rc = efx_ef10_sriov_assign_vf_vport(efx, i);
+ if (rc)
+ goto fail;
+ }
+
+ return 0;
+fail:
+ efx_ef10_sriov_free_vf_vswitching(efx);
+ return rc;
+}
+
+/* On top of the default firmware vswitch setup, create a VEB vswitch and
+ * expansion vport for use by this function.
+ */
+int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
+ int rc;
+
+ if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) {
+ /* vswitch not needed as we have no VFs */
+ efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ return 0;
+ }
+
+ rc = efx_ef10_vswitch_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB);
+ if (rc)
+ goto fail1;
+
+ rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+ EFX_EF10_NO_VLAN, &nic_data->vport_id);
+ if (rc)
+ goto fail2;
+
+ rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, net_dev->dev_addr);
+ if (rc)
+ goto fail3;
+ ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
+
+ rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ if (rc)
+ goto fail4;
+
+ return 0;
+fail4:
+ efx_ef10_vport_del_mac(efx, nic_data->vport_id, nic_data->vport_mac);
+ eth_zero_addr(nic_data->vport_mac);
+fail3:
+ efx_ef10_vport_free(efx, nic_data->vport_id);
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+fail2:
+ efx_ef10_vswitch_free(efx, EVB_PORT_ID_ASSIGNED);
+fail1:
+ return rc;
+}
+
+int efx_ef10_vswitching_probe_vf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+}
+
+int efx_ef10_vswitching_restore_pf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (!nic_data->must_probe_vswitching)
+ return 0;
+
+ rc = efx_ef10_vswitching_probe_pf(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_ef10_sriov_restore_vf_vswitching(efx);
+ if (rc)
+ goto fail;
+
+ nic_data->must_probe_vswitching = false;
+fail:
+ return rc;
+}
+
+int efx_ef10_vswitching_restore_vf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (!nic_data->must_probe_vswitching)
+ return 0;
+
+ rc = efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+ if (rc)
+ return rc;
+
+ nic_data->must_probe_vswitching = false;
+ return 0;
+}
+
+void efx_ef10_vswitching_remove_pf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ efx_ef10_sriov_free_vf_vswitching(efx);
+
+ efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+
+ if (nic_data->vport_id == EVB_PORT_ID_ASSIGNED)
+ return; /* No vswitch was ever created */
+
+ if (!is_zero_ether_addr(nic_data->vport_mac)) {
+ efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ efx->net_dev->dev_addr);
+ eth_zero_addr(nic_data->vport_mac);
+ }
+ efx_ef10_vport_free(efx, nic_data->vport_id);
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
+ /* Only free the vswitch if no VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ efx_ef10_vswitch_free(efx, nic_data->vport_id);
+}
+
+void efx_ef10_vswitching_remove_vf(struct efx_nic *efx)
+{
+ efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+}
+
+static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs)
+{
+ int rc = 0;
+ struct pci_dev *dev = efx->pci_dev;
+
+ efx->vf_count = num_vfs;
+
+ rc = efx_ef10_sriov_alloc_vf_vswitching(efx);
+ if (rc)
+ goto fail1;
+
+ rc = pci_enable_sriov(dev, num_vfs);
+ if (rc)
+ goto fail2;
+
+ return 0;
+fail2:
+ efx_ef10_sriov_free_vf_vswitching(efx);
+fail1:
+ efx->vf_count = 0;
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to enable SRIOV VFs\n");
+ return rc;
+}
+
+static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
+{
+ struct pci_dev *dev = efx->pci_dev;
+ unsigned int vfs_assigned = 0;
+
+ vfs_assigned = pci_vfs_assigned(dev);
+
+ if (vfs_assigned && !force) {
+ netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
+ "please detach them before disabling SR-IOV\n");
+ return -EBUSY;
+ }
+
+ if (!vfs_assigned)
+ pci_disable_sriov(dev);
+
+ efx_ef10_sriov_free_vf_vswitching(efx);
+ efx->vf_count = 0;
+ return 0;
+}
+
+int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs)
+{
+ if (num_vfs == 0)
+ return efx_ef10_pci_sriov_disable(efx, false);
+ else
+ return efx_ef10_pci_sriov_enable(efx, num_vfs);
+}
+
+int efx_ef10_sriov_init(struct efx_nic *efx)
+{
+ return 0;
+}
+
+void efx_ef10_sriov_fini(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int i;
+ int rc;
+
+ if (!nic_data->vf) {
+ /* Remove any un-assigned orphaned VFs */
+ if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev))
+ pci_disable_sriov(efx->pci_dev);
+ return;
+ }
+
+ /* Remove any VFs in the host */
+ for (i = 0; i < efx->vf_count; ++i) {
+ struct efx_nic *vf_efx = nic_data->vf[i].efx;
+
+ if (vf_efx)
+ vf_efx->pci_dev->driver->remove(vf_efx->pci_dev);
+ }
+
+ rc = efx_ef10_pci_sriov_disable(efx, true);
+ if (rc)
+ netif_dbg(efx, drv, efx->net_dev,
+ "Disabling SRIOV was not successful rc=%d\n", rc);
+ else
+ netif_dbg(efx, drv, efx->net_dev, "SRIOV disabled\n");
+}
+
+static int efx_ef10_vport_del_vf_mac(struct efx_nic *efx, unsigned int port_id,
+ u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+
+ return rc;
+}
+
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf;
+ int rc;
+
+ if (!nic_data->vf)
+ return -EOPNOTSUPP;
+
+ if (vf_i >= efx->vf_count)
+ return -EINVAL;
+ vf = nic_data->vf + vf_i;
+
+ if (vf->efx) {
+ efx_device_detach_sync(vf->efx);
+ efx_net_stop(vf->efx->net_dev);
+
+ down_write(&vf->efx->filter_sem);
+ vf->efx->type->filter_table_remove(vf->efx);
+
+ rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc) {
+ up_write(&vf->efx->filter_sem);
+ return rc;
+ }
+ }
+
+ rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
+ if (rc)
+ return rc;
+
+ if (!is_zero_ether_addr(vf->mac)) {
+ rc = efx_ef10_vport_del_vf_mac(efx, vf->vport_id, vf->mac);
+ if (rc)
+ return rc;
+ }
+
+ if (!is_zero_ether_addr(mac)) {
+ rc = efx_ef10_vport_add_mac(efx, vf->vport_id, mac);
+ if (rc) {
+ eth_zero_addr(vf->mac);
+ goto fail;
+ }
+ if (vf->efx)
+ ether_addr_copy(vf->efx->net_dev->dev_addr, mac);
+ }
+
+ ether_addr_copy(vf->mac, mac);
+
+ rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+ if (rc)
+ goto fail;
+
+ if (vf->efx) {
+ /* VF cannot use the vport_id that the PF created */
+ rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc) {
+ up_write(&vf->efx->filter_sem);
+ return rc;
+ }
+ vf->efx->type->filter_table_probe(vf->efx);
+ up_write(&vf->efx->filter_sem);
+ efx_net_open(vf->efx->net_dev);
+ netif_device_attach(vf->efx->net_dev);
+ }
+
+ return 0;
+
+fail:
+ memset(vf->mac, 0, ETH_ALEN);
+ return rc;
+}
+
+int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
+ u8 qos)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf;
+ u16 old_vlan, new_vlan;
+ int rc = 0, rc2 = 0;
+
+ if (vf_i >= efx->vf_count)
+ return -EINVAL;
+ if (qos != 0)
+ return -EINVAL;
+
+ vf = nic_data->vf + vf_i;
+
+ new_vlan = (vlan == 0) ? EFX_EF10_NO_VLAN : vlan;
+ if (new_vlan == vf->vlan)
+ return 0;
+
+ if (vf->efx) {
+ efx_device_detach_sync(vf->efx);
+ efx_net_stop(vf->efx->net_dev);
+
+ down_write(&vf->efx->filter_sem);
+ vf->efx->type->filter_table_remove(vf->efx);
+
+ rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc)
+ goto restore_filters;
+ }
+
+ if (vf->vport_assigned) {
+ rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
+ if (rc) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Failed to change vlan on VF %d.\n", vf_i);
+ netif_warn(efx, drv, efx->net_dev,
+ "This is likely because the VF is bound to a driver in a VM.\n");
+ netif_warn(efx, drv, efx->net_dev,
+ "Please unload the driver in the VM.\n");
+ goto restore_vadaptor;
+ }
+ vf->vport_assigned = 0;
+ }
+
+ if (!is_zero_ether_addr(vf->mac)) {
+ rc = efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac);
+ if (rc)
+ goto restore_evb_port;
+ }
+
+ if (vf->vport_id) {
+ rc = efx_ef10_vport_free(efx, vf->vport_id);
+ if (rc)
+ goto restore_mac;
+ vf->vport_id = 0;
+ }
+
+ /* Do the actual vlan change */
+ old_vlan = vf->vlan;
+ vf->vlan = new_vlan;
+
+ /* Restore everything in reverse order */
+ rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+ vf->vlan, &vf->vport_id);
+ if (rc)
+ goto reset_nic;
+
+restore_mac:
+ if (!is_zero_ether_addr(vf->mac)) {
+ rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
+ if (rc2) {
+ eth_zero_addr(vf->mac);
+ goto reset_nic;
+ }
+ }
+
+restore_evb_port:
+ rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+ if (rc2)
+ goto reset_nic;
+ else
+ vf->vport_assigned = 1;
+
+restore_vadaptor:
+ if (vf->efx) {
+ rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc2)
+ goto reset_nic;
+ }
+
+restore_filters:
+ if (vf->efx) {
+ rc2 = vf->efx->type->filter_table_probe(vf->efx);
+ if (rc2)
+ goto reset_nic;
+
+ up_write(&vf->efx->filter_sem);
+
+ rc2 = efx_net_open(vf->efx->net_dev);
+ if (rc2)
+ goto reset_nic;
+
+ netif_device_attach(vf->efx->net_dev);
+ }
+ return rc;
+
+reset_nic:
+ if (vf->efx) {
+ up_write(&vf->efx->filter_sem);
+ netif_err(efx, drv, efx->net_dev,
+ "Failed to restore VF - scheduling reset.\n");
+ efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH);
+ } else {
+ netif_err(efx, drv, efx->net_dev,
+ "Failed to restore the VF and cannot reset the VF "
+ "- VF is not functional.\n");
+ netif_err(efx, drv, efx->net_dev,
+ "Please reload the driver attached to the VF.\n");
+ }
+
+ return rc ? rc : rc2;
+}
+
+int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
+ bool spoofchk)
+{
+ return spoofchk ? -EOPNOTSUPP : 0;
+}
+
+int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
+ int link_state)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ BUILD_BUG_ON(IFLA_VF_LINK_STATE_AUTO !=
+ MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO);
+ BUILD_BUG_ON(IFLA_VF_LINK_STATE_ENABLE !=
+ MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP);
+ BUILD_BUG_ON(IFLA_VF_LINK_STATE_DISABLE !=
+ MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN);
+ MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION,
+ LINK_STATE_MODE_IN_FUNCTION_PF,
+ nic_data->pf_index,
+ LINK_STATE_MODE_IN_FUNCTION_VF, vf_i);
+ MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE, link_state);
+ return efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL); /* don't care what old mode was */
+}
+
+int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
+ struct ifla_vf_info *ivf)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_LINK_STATE_MODE_OUT_LEN);
+
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf;
+ size_t outlen;
+ int rc;
+
+ if (vf_i >= efx->vf_count)
+ return -EINVAL;
+
+ if (!nic_data->vf)
+ return -EOPNOTSUPP;
+
+ vf = nic_data->vf + vf_i;
+
+ ivf->vf = vf_i;
+ ivf->min_tx_rate = 0;
+ ivf->max_tx_rate = 0;
+ ether_addr_copy(ivf->mac, vf->mac);
+ ivf->vlan = (vf->vlan == EFX_EF10_NO_VLAN) ? 0 : vf->vlan;
+ ivf->qos = 0;
+
+ MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION,
+ LINK_STATE_MODE_IN_FUNCTION_PF,
+ nic_data->pf_index,
+ LINK_STATE_MODE_IN_FUNCTION_VF, vf_i);
+ MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE,
+ MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE);
+ rc = efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_LINK_STATE_MODE_OUT_LEN)
+ return -EIO;
+ ivf->linkstate = MCDI_DWORD(outbuf, LINK_STATE_MODE_OUT_OLD_MODE);
+
+ return 0;
+}
+
+int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (!is_valid_ether_addr(nic_data->port_id))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = ETH_ALEN;
+ memcpy(ppid->id, nic_data->port_id, ppid->id_len);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
new file mode 100644
index 000000000000..db4ef537c610
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -0,0 +1,69 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF10_SRIOV_H
+#define EF10_SRIOV_H
+
+#include "net_driver.h"
+
+/**
+ * struct ef10_vf - PF's store of VF data
+ * @efx: efx_nic struct for the current VF
+ * @pci_dev: the pci_dev struct for the VF, retained while the VF is assigned
+ * @vport_id: vport ID for the VF
+ * @vport_assigned: record whether the vport is currently assigned to the VF
+ * @mac: MAC address for the VF, zero when address is removed from the vport
+ * @vlan: Default VLAN for the VF or #EFX_EF10_NO_VLAN
+ */
+struct ef10_vf {
+ struct efx_nic *efx;
+ struct pci_dev *pci_dev;
+ unsigned int vport_id;
+ unsigned int vport_assigned;
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+#define EFX_EF10_NO_VLAN 0
+};
+
+static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx)
+{
+ return false;
+}
+
+int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs);
+int efx_ef10_sriov_init(struct efx_nic *efx);
+static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
+void efx_ef10_sriov_fini(struct efx_nic *efx);
+static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {}
+
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+
+int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
+ u16 vlan, u8 qos);
+
+int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
+ bool spoofchk);
+
+int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
+ struct ifla_vf_info *ivf);
+
+int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
+ int link_state);
+
+int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid);
+
+int efx_ef10_vswitching_probe_pf(struct efx_nic *efx);
+int efx_ef10_vswitching_probe_vf(struct efx_nic *efx);
+int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
+int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
+void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
+void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
+
+#endif /* EF10_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 4b00545a3ace..0c42ed9c9e4c 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -26,6 +26,7 @@
#include "efx.h"
#include "nic.h"
#include "selftest.h"
+#include "sriov.h"
#include "mcdi.h"
#include "workarounds.h"
@@ -76,6 +77,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
[RESET_TYPE_WORLD] = "WORLD",
[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_DATAPATH] = "DATAPATH",
[RESET_TYPE_MC_BIST] = "MC_BIST",
[RESET_TYPE_DISABLE] = "DISABLE",
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
@@ -948,6 +950,16 @@ void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
static void efx_fini_port(struct efx_nic *efx);
+/* We assume that efx->type->reconfigure_mac will always try to sync RX
+ * filters and therefore needs to read-lock the filter table against freeing
+ */
+void efx_mac_reconfigure(struct efx_nic *efx)
+{
+ down_read(&efx->filter_sem);
+ efx->type->reconfigure_mac(efx);
+ up_read(&efx->filter_sem);
+}
+
/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
* the MAC appropriately. All other PHY configuration changes are pushed
* through phy_op->set_settings(), and pushed asynchronously to the MAC
@@ -1001,7 +1013,7 @@ static void efx_mac_work(struct work_struct *data)
mutex_lock(&efx->mac_lock);
if (efx->port_enabled)
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
}
@@ -1041,11 +1053,11 @@ static int efx_init_port(struct efx_nic *efx)
/* Reconfigure the MAC before creating dma queues (required for
* Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
/* Ensure the PHY advertises the correct flow control settings */
rc = efx->phy_op->reconfigure(efx);
- if (rc)
+ if (rc && rc != -EPERM)
goto fail2;
mutex_unlock(&efx->mac_lock);
@@ -1067,7 +1079,7 @@ static void efx_start_port(struct efx_nic *efx)
efx->port_enabled = true;
/* Ensure MAC ingress/egress is enabled */
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
}
@@ -1200,10 +1212,12 @@ static int efx_init_io(struct efx_nic *efx)
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
unsigned int mem_map_size = efx->type->mem_map_size(efx);
- int rc;
+ int rc, bar;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
+ bar = efx->type->mem_bar;
+
rc = pci_enable_device(pci_dev);
if (rc) {
netif_err(efx, probe, efx->net_dev,
@@ -1234,8 +1248,8 @@ static int efx_init_io(struct efx_nic *efx)
netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask);
- efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
- rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
+ efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
+ rc = pci_request_region(pci_dev, bar, "sfc");
if (rc) {
netif_err(efx, probe, efx->net_dev,
"request for memory BAR failed\n");
@@ -1258,7 +1272,7 @@ static int efx_init_io(struct efx_nic *efx)
return 0;
fail4:
- pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+ pci_release_region(efx->pci_dev, bar);
fail3:
efx->membase_phys = 0;
fail2:
@@ -1269,6 +1283,8 @@ static int efx_init_io(struct efx_nic *efx)
static void efx_fini_io(struct efx_nic *efx)
{
+ int bar;
+
netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
if (efx->membase) {
@@ -1277,11 +1293,23 @@ static void efx_fini_io(struct efx_nic *efx)
}
if (efx->membase_phys) {
- pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+ bar = efx->type->mem_bar;
+ pci_release_region(efx->pci_dev, bar);
efx->membase_phys = 0;
}
- pci_disable_device(efx->pci_dev);
+ /* Don't disable bus-mastering if VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ pci_disable_device(efx->pci_dev);
+}
+
+void efx_set_default_rx_indir_table(struct efx_nic *efx)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ efx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
}
static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
@@ -1314,15 +1342,19 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
/* If RSS is requested for the PF *and* VFs then we can't write RSS
* table entries that are inaccessible to VFs
*/
- if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
- count > efx_vf_size(efx)) {
- netif_warn(efx, probe, efx->net_dev,
- "Reducing number of RSS channels from %u to %u for "
- "VF support. Increase vf-msix-limit to use more "
- "channels on the PF.\n",
- count, efx_vf_size(efx));
- count = efx_vf_size(efx);
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+ count > efx_vf_size(efx)) {
+ netif_warn(efx, probe, efx->net_dev,
+ "Reducing number of RSS channels from %u to %u for "
+ "VF support. Increase vf-msix-limit to use more "
+ "channels on the PF.\n",
+ count, efx_vf_size(efx));
+ count = efx_vf_size(efx);
+ }
}
+#endif
return count;
}
@@ -1426,10 +1458,15 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
/* RSS might be usable on VFs even if it is disabled on the PF */
-
- efx->rss_spread = ((efx->n_rx_channels > 1 ||
- !efx->type->sriov_wanted(efx)) ?
- efx->n_rx_channels : efx_vf_size(efx));
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ efx->rss_spread = ((efx->n_rx_channels > 1 ||
+ !efx->type->sriov_wanted(efx)) ?
+ efx->n_rx_channels : efx_vf_size(efx));
+ return 0;
+ }
+#endif
+ efx->rss_spread = efx->n_rx_channels;
return 0;
}
@@ -1593,7 +1630,6 @@ static void efx_set_channels(struct efx_nic *efx)
static int efx_probe_nic(struct efx_nic *efx)
{
- size_t i;
int rc;
netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
@@ -1616,10 +1652,9 @@ static int efx_probe_nic(struct efx_nic *efx)
goto fail2;
if (efx->n_channels > 1)
- netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
- for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
- efx->rx_indir_table[i] =
- ethtool_rxfh_indir_default(i, efx->rss_spread);
+ netdev_rss_key_fill(&efx->rx_hash_key,
+ sizeof(efx->rx_hash_key));
+ efx_set_default_rx_indir_table(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
@@ -1650,10 +1685,11 @@ static int efx_probe_filters(struct efx_nic *efx)
int rc;
spin_lock_init(&efx->filter_lock);
-
+ init_rwsem(&efx->filter_sem);
+ down_write(&efx->filter_sem);
rc = efx->type->filter_table_probe(efx);
if (rc)
- return rc;
+ goto out_unlock;
#ifdef CONFIG_RFS_ACCEL
if (efx->type->offload_features & NETIF_F_NTUPLE) {
@@ -1662,12 +1698,14 @@ static int efx_probe_filters(struct efx_nic *efx)
GFP_KERNEL);
if (!efx->rps_flow_id) {
efx->type->filter_table_remove(efx);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_unlock;
}
}
#endif
-
- return 0;
+out_unlock:
+ up_write(&efx->filter_sem);
+ return rc;
}
static void efx_remove_filters(struct efx_nic *efx)
@@ -1675,12 +1713,16 @@ static void efx_remove_filters(struct efx_nic *efx)
#ifdef CONFIG_RFS_ACCEL
kfree(efx->rps_flow_id);
#endif
+ down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
+ up_write(&efx->filter_sem);
}
static void efx_restore_filters(struct efx_nic *efx)
{
+ down_read(&efx->filter_sem);
efx->type->filter_table_restore(efx);
+ up_read(&efx->filter_sem);
}
/**************************************************************************
@@ -1712,21 +1754,33 @@ static int efx_probe_all(struct efx_nic *efx)
}
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx->type->vswitching_probe(efx);
+ if (rc) /* not fatal; the PF will still work fine */
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to setup vswitching rc=%d;"
+ " VFs may not function\n", rc);
+#endif
+
rc = efx_probe_filters(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create filter tables\n");
- goto fail3;
+ goto fail4;
}
rc = efx_probe_channels(efx);
if (rc)
- goto fail4;
+ goto fail5;
return 0;
- fail4:
+ fail5:
efx_remove_filters(efx);
+ fail4:
+#ifdef CONFIG_SFC_SRIOV
+ efx->type->vswitching_remove(efx);
+#endif
fail3:
efx_remove_port(efx);
fail2:
@@ -1816,6 +1870,9 @@ static void efx_remove_all(struct efx_nic *efx)
{
efx_remove_channels(efx);
efx_remove_filters(efx);
+#ifdef CONFIG_SFC_SRIOV
+ efx->type->vswitching_remove(efx);
+#endif
efx_remove_port(efx);
efx_remove_nic(efx);
}
@@ -2059,7 +2116,7 @@ static int efx_busy_poll(struct napi_struct *napi)
*************************************************************************/
/* Context: process, rtnl_lock() held. */
-static int efx_net_open(struct net_device *net_dev)
+int efx_net_open(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
@@ -2088,7 +2145,7 @@ static int efx_net_open(struct net_device *net_dev)
* Note that the kernel will ignore our return code; this method
* should really be a void.
*/
-static int efx_net_stop(struct net_device *net_dev)
+int efx_net_stop(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2146,7 +2203,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu;
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
@@ -2159,6 +2216,8 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data;
u8 *new_addr = addr->sa_data;
+ u8 old_addr[6];
+ int rc;
if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev,
@@ -2167,12 +2226,20 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
return -EADDRNOTAVAIL;
}
+ /* save old address */
+ ether_addr_copy(old_addr, net_dev->dev_addr);
ether_addr_copy(net_dev->dev_addr, new_addr);
- efx->type->sriov_mac_address_changed(efx);
+ if (efx->type->set_mac_address) {
+ rc = efx->type->set_mac_address(efx);
+ if (rc) {
+ ether_addr_copy(net_dev->dev_addr, old_addr);
+ return rc;
+ }
+ }
/* Reconfigure the MAC */
mutex_lock(&efx->mac_lock);
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
return 0;
@@ -2199,7 +2266,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
return 0;
}
-static const struct net_device_ops efx_farch_netdev_ops = {
+static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
.ndo_get_stats64 = efx_net_stats,
@@ -2212,10 +2279,12 @@ static const struct net_device_ops efx_farch_netdev_ops = {
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
#ifdef CONFIG_SFC_SRIOV
- .ndo_set_vf_mac = efx_siena_sriov_set_vf_mac,
- .ndo_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
- .ndo_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
- .ndo_get_vf_config = efx_siena_sriov_get_vf_config,
+ .ndo_set_vf_mac = efx_sriov_set_vf_mac,
+ .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
+ .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
+ .ndo_get_vf_config = efx_sriov_get_vf_config,
+ .ndo_set_vf_link_state = efx_sriov_set_vf_link_state,
+ .ndo_get_phys_port_id = efx_sriov_get_phys_port_id,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
@@ -2229,29 +2298,6 @@ static const struct net_device_ops efx_farch_netdev_ops = {
#endif
};
-static const struct net_device_ops efx_ef10_netdev_ops = {
- .ndo_open = efx_net_open,
- .ndo_stop = efx_net_stop,
- .ndo_get_stats64 = efx_net_stats,
- .ndo_tx_timeout = efx_watchdog,
- .ndo_start_xmit = efx_hard_start_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = efx_ioctl,
- .ndo_change_mtu = efx_change_mtu,
- .ndo_set_mac_address = efx_set_mac_address,
- .ndo_set_rx_mode = efx_set_rx_mode,
- .ndo_set_features = efx_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = efx_netpoll,
-#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = efx_busy_poll,
-#endif
-#ifdef CONFIG_RFS_ACCEL
- .ndo_rx_flow_steer = efx_filter_rfs,
-#endif
-};
-
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
@@ -2264,8 +2310,7 @@ static int efx_netdev_event(struct notifier_block *this,
{
struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
- if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
- net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
+ if ((net_dev->netdev_ops == &efx_netdev_ops) &&
event == NETDEV_CHANGENAME)
efx_update_name(netdev_priv(net_dev));
@@ -2284,6 +2329,28 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
+}
+static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ bool enable = count > 0 && *buf != '0';
+
+ mcdi->logging_enabled = enable;
+ return count;
+}
+static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
+#endif
+
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
@@ -2292,12 +2359,9 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- net_dev->netdev_ops = &efx_ef10_netdev_ops;
+ net_dev->netdev_ops = &efx_netdev_ops;
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
net_dev->priv_flags |= IFF_UNICAST_FLT;
- } else {
- net_dev->netdev_ops = &efx_farch_netdev_ops;
- }
net_dev->ethtool_ops = &efx_ethtool_ops;
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
@@ -2344,9 +2408,21 @@ static int efx_register_netdev(struct efx_nic *efx)
"failed to init net dev attributes\n");
goto fail_registered;
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
+ goto fail_attr_mcdi_logging;
+ }
+#endif
return 0;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+fail_attr_mcdi_logging:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+#endif
fail_registered:
rtnl_lock();
efx_dissociate(efx);
@@ -2365,13 +2441,14 @@ static void efx_unregister_netdev(struct efx_nic *efx)
BUG_ON(netdev_priv(efx->net_dev) != efx);
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
- device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-
- rtnl_lock();
- unregister_netdevice(efx->net_dev);
- efx->state = STATE_UNINIT;
- rtnl_unlock();
+ if (efx_dev_registered(efx)) {
+ strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+#endif
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ unregister_netdev(efx->net_dev);
+ }
}
/**************************************************************************
@@ -2393,7 +2470,8 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
efx_disable_interrupts(efx);
mutex_lock(&efx->mac_lock);
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH)
efx->phy_op->fini(efx);
efx->type->fini(efx);
}
@@ -2422,11 +2500,13 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (!ok)
goto fail;
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH) {
rc = efx->phy_op->init(efx);
if (rc)
goto fail;
- if (efx->phy_op->reconfigure(efx))
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc && rc != -EPERM)
netif_err(efx, drv, efx->net_dev,
"could not restore PHY settings\n");
}
@@ -2434,8 +2514,20 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
rc = efx_enable_interrupts(efx);
if (rc)
goto fail;
+
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx->type->vswitching_restore(efx);
+ if (rc) /* not fatal; the PF will still work fine */
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to restore vswitching rc=%d;"
+ " VFs may not function\n", rc);
+#endif
+
+ down_read(&efx->filter_sem);
efx_restore_filters(efx);
- efx->type->sriov_reset(efx);
+ up_read(&efx->filter_sem);
+ if (efx->type->sriov_reset)
+ efx->type->sriov_reset(efx);
mutex_unlock(&efx->mac_lock);
@@ -2605,6 +2697,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_DATAPATH:
case RESET_TYPE_MC_BIST:
case RESET_TYPE_MCDI_TIMEOUT:
method = type;
@@ -2655,6 +2748,8 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */
+ .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{0} /* end of list */
@@ -2809,7 +2904,8 @@ static void efx_pci_remove_main(struct efx_nic *efx)
}
/* Final NIC shutdown
- * This is called only at module unload (or hotplug removal).
+ * This is called only at module unload (or hotplug removal). A PF can call
+ * this on its VFs to ensure they are unbound first.
*/
static void efx_pci_remove(struct pci_dev *pci_dev)
{
@@ -2826,7 +2922,9 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_disable_interrupts(efx);
rtnl_unlock();
- efx->type->sriov_fini(efx);
+ if (efx->type->sriov_fini)
+ efx->type->sriov_fini(efx);
+
efx_unregister_netdev(efx);
efx_mtd_remove(efx);
@@ -3008,7 +3106,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
- efx_probe_vpd_strings(efx);
+ if (!efx->type->is_vf)
+ efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
@@ -3023,10 +3122,12 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
if (rc)
goto fail4;
- rc = efx->type->sriov_init(efx);
- if (rc)
- netif_err(efx, probe, efx->net_dev,
- "SR-IOV can't be enabled rc %d\n", rc);
+ if (efx->type->sriov_init) {
+ rc = efx->type->sriov_init(efx);
+ if (rc)
+ netif_err(efx, probe, efx->net_dev,
+ "SR-IOV can't be enabled rc %d\n", rc);
+ }
netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
@@ -3058,6 +3159,26 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
return rc;
}
+/* efx_pci_sriov_configure returns the actual number of Virtual Functions
+ * enabled on success
+ */
+#ifdef CONFIG_SFC_SRIOV
+static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ int rc;
+ struct efx_nic *efx = pci_get_drvdata(dev);
+
+ if (efx->type->sriov_configure) {
+ rc = efx->type->sriov_configure(efx, num_vfs);
+ if (rc)
+ return rc;
+ else
+ return num_vfs;
+ } else
+ return -EOPNOTSUPP;
+}
+#endif
+
static int efx_pm_freeze(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
@@ -3280,6 +3401,9 @@ static struct pci_driver efx_pci_driver = {
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
.err_handler = &efx_err_handlers,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_pci_sriov_configure,
+#endif
};
/**************************************************************************
@@ -3302,9 +3426,11 @@ static int __init efx_init_module(void)
if (rc)
goto err_notifier;
+#ifdef CONFIG_SFC_SRIOV
rc = efx_init_sriov();
if (rc)
goto err_sriov;
+#endif
reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!reset_workqueue) {
@@ -3321,8 +3447,10 @@ static int __init efx_init_module(void)
err_pci:
destroy_workqueue(reset_workqueue);
err_reset:
+#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
err_sriov:
+#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
@@ -3334,7 +3462,9 @@ static void __exit efx_exit_module(void)
pci_unregister_driver(&efx_pci_driver);
destroy_workqueue(reset_workqueue);
+#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
+#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 2587c582a821..acb1e0718485 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -15,7 +15,12 @@
#include "filter.h"
/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+/* All VFs use BAR 0/1 for memory */
#define EFX_MEM_BAR 2
+#define EFX_MEM_VF_BAR 0
+
+int efx_net_open(struct net_device *net_dev);
+int efx_net_stop(struct net_device *net_dev);
/* TX */
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
@@ -32,6 +37,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
/* RX */
+void efx_set_default_rx_indir_table(struct efx_nic *efx);
void efx_rx_config_page_split(struct efx_nic *efx);
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
@@ -71,6 +77,8 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
/* Filters */
+void efx_mac_reconfigure(struct efx_nic *efx);
+
/**
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
@@ -220,6 +228,13 @@ static inline void efx_mtd_rename(struct efx_nic *efx) {}
static inline void efx_mtd_remove(struct efx_nic *efx) {}
#endif
+#ifdef CONFIG_SFC_SRIOV
+static inline unsigned int efx_vf_size(struct efx_nic *efx)
+{
+ return 1 << efx->vi_scale;
+}
+#endif
+
static inline void efx_schedule_channel(struct efx_channel *channel)
{
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index d1dbb5fb31bb..c94f56271dd4 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -143,6 +143,7 @@ enum efx_loopback_mode {
* @RESET_TYPE_WORLD: Reset as much as possible
* @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
* unsuccessful.
+ * @RESET_TYPE_DATAPATH: Reset datapath only.
* @RESET_TYPE_MC_BIST: MC entering BIST mode.
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
@@ -159,6 +160,7 @@ enum reset_type {
RESET_TYPE_ALL,
RESET_TYPE_WORLD,
RESET_TYPE_RECOVER_OR_DISABLE,
+ RESET_TYPE_DATAPATH,
RESET_TYPE_MC_BIST,
RESET_TYPE_DISABLE,
RESET_TYPE_MAX_METHOD,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 4835bc0d0de8..034797661f96 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -734,7 +734,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
/* Reconfigure the MAC. The PHY *may* generate a link state change event
* if the user just changed the advertised capabilities, but there's no
* harm doing this twice */
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
out:
mutex_unlock(&efx->mac_lock);
@@ -1109,9 +1109,8 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
return -EOPNOTSUPP;
if (!indir)
return 0;
- memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
- efx->type->rx_push_rss_config(efx);
- return 0;
+
+ return efx->type->rx_push_rss_config(efx, true, indir);
}
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index f166c8ef38a3..80e69af21642 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -477,16 +477,29 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
*
**************************************************************************
*/
+static int dummy_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
+{
+ (void) efx;
+ (void) user;
+ (void) rx_indir_table;
+ return -ENOSYS;
+}
-static void falcon_b0_rx_push_rss_config(struct efx_nic *efx)
+static int falcon_b0_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
{
efx_oword_t temp;
+ (void) user;
/* Set hash key for IPv4 */
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+ memcpy(efx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rx_indir_table));
efx_farch_rx_push_indir_table(efx);
+ return 0;
}
/**************************************************************************
@@ -2507,7 +2520,7 @@ static int falcon_init_nic(struct efx_nic *efx)
falcon_init_rx_cfg(efx);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- falcon_b0_rx_push_rss_config(efx);
+ falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table);
/* Set destination of both TX and RX Flush events */
EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
@@ -2687,6 +2700,8 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
*/
const struct efx_nic_type falcon_a1_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = falcon_a1_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
@@ -2729,7 +2744,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_rss_config = efx_port_dummy_op_void,
+ .rx_push_rss_config = dummy_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
@@ -2766,11 +2781,6 @@ const struct efx_nic_type falcon_a1_nic_type = {
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
- .sriov_init = efx_falcon_sriov_init,
- .sriov_fini = efx_falcon_sriov_fini,
- .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
- .sriov_wanted = efx_falcon_sriov_wanted,
- .sriov_reset = efx_falcon_sriov_reset,
.revision = EFX_REV_FALCON_A1,
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
@@ -2788,6 +2798,8 @@ const struct efx_nic_type falcon_a1_nic_type = {
};
const struct efx_nic_type falcon_b0_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = falcon_b0_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
@@ -2867,11 +2879,6 @@ const struct efx_nic_type falcon_b0_nic_type = {
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
- .sriov_init = efx_falcon_sriov_init,
- .sriov_fini = efx_falcon_sriov_fini,
- .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
- .sriov_wanted = efx_falcon_sriov_wanted,
- .sriov_reset = efx_falcon_sriov_reset,
.revision = EFX_REV_FALCON_B0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index bb89e96a125e..f08266f0eca2 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -20,6 +20,8 @@
#include "efx.h"
#include "nic.h"
#include "farch_regs.h"
+#include "sriov.h"
+#include "siena_sriov.h"
#include "io.h"
#include "workarounds.h"
@@ -1198,13 +1200,17 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_tx_flush_done(efx, event);
+#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_tx_flush_done(efx, event);
+#endif
break;
case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_rx_flush_done(efx, event);
+#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_rx_flush_done(efx, event);
+#endif
break;
case FSE_AZ_EVQ_INIT_DONE_EV:
netif_dbg(efx, hw, efx->net_dev,
@@ -1242,8 +1248,11 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
" RX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
- } else
+ }
+#ifdef CONFIG_SFC_SRIOV
+ else
efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
+#endif
break;
case FSE_BZ_TX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
@@ -1252,8 +1261,11 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
" TX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
- } else
+ }
+#ifdef CONFIG_SFC_SRIOV
+ else
efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
+#endif
break;
default:
netif_vdbg(efx, hw, efx->net_dev,
@@ -1317,9 +1329,11 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
case FSE_AZ_EV_CODE_DRIVER_EV:
efx_farch_handle_driver_event(channel, &event);
break;
+#ifdef CONFIG_SFC_SRIOV
case FSE_CZ_EV_CODE_USER_EV:
efx_siena_sriov_event(channel, &event);
break;
+#endif
case FSE_CZ_EV_CODE_MCDI_EV:
efx_mcdi_process_event(channel, &event);
break;
@@ -1685,28 +1699,32 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
#ifdef CONFIG_SFC_SRIOV
- if (efx->type->sriov_wanted(efx)) {
- unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
-
- nic_data->vf_buftbl_base = buftbl_min;
-
- vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
- vi_count = max(vi_count, EFX_VI_BASE);
- buftbl_free = (sram_lim_qw - buftbl_min -
- vi_count * vi_dc_entries);
-
- entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
- efx_vf_size(efx));
- vf_limit = min(buftbl_free / entries_per_vf,
- (1024U - EFX_VI_BASE) >> efx->vi_scale);
-
- if (efx->vf_count > vf_limit) {
- netif_err(efx, probe, efx->net_dev,
- "Reducing VF count from from %d to %d\n",
- efx->vf_count, vf_limit);
- efx->vf_count = vf_limit;
+ if (efx->type->sriov_wanted) {
+ if (efx->type->sriov_wanted(efx)) {
+ unsigned vi_dc_entries, buftbl_free;
+ unsigned entries_per_vf, vf_limit;
+
+ nic_data->vf_buftbl_base = buftbl_min;
+
+ vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
+ vi_count = max(vi_count, EFX_VI_BASE);
+ buftbl_free = (sram_lim_qw - buftbl_min -
+ vi_count * vi_dc_entries);
+
+ entries_per_vf = ((vi_dc_entries +
+ EFX_VF_BUFTBL_PER_VI) *
+ efx_vf_size(efx));
+ vf_limit = min(buftbl_free / entries_per_vf,
+ (1024U - EFX_VI_BASE) >> efx->vi_scale);
+
+ if (efx->vf_count > vf_limit) {
+ netif_err(efx, probe, efx->net_dev,
+ "Reducing VF count from from %d to %d\n",
+ efx->vf_count, vf_limit);
+ efx->vf_count = vf_limit;
+ }
+ vi_count += efx->vf_count * efx_vf_size(efx);
}
- vi_count += efx->vf_count * efx_vf_size(efx);
}
#endif
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index d37928f01949..81640f8bb811 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -8,6 +8,7 @@
*/
#include <linux/delay.h>
+#include <linux/moduleparam.h>
#include <asm/cmpxchg.h>
#include "net_driver.h"
#include "nic.h"
@@ -54,18 +55,32 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
static bool efx_mcdi_poll_once(struct efx_nic *efx);
static void efx_mcdi_abandon(struct efx_nic *efx);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static bool mcdi_logging_default;
+module_param(mcdi_logging_default, bool, 0644);
+MODULE_PARM_DESC(mcdi_logging_default,
+ "Enable MCDI logging on newly-probed functions");
+#endif
+
int efx_mcdi_init(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
bool already_attached;
- int rc;
+ int rc = -ENOMEM;
efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
if (!efx->mcdi)
- return -ENOMEM;
+ goto fail;
mcdi = efx_mcdi(efx);
mcdi->efx = efx;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ /* consuming code assumes buffer is page-sized */
+ mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
+ if (!mcdi->logging_buffer)
+ goto fail1;
+ mcdi->logging_enabled = mcdi_logging_default;
+#endif
init_waitqueue_head(&mcdi->wq);
spin_lock_init(&mcdi->iface_lock);
mcdi->state = MCDI_STATE_QUIESCENT;
@@ -81,7 +96,7 @@ int efx_mcdi_init(struct efx_nic *efx)
/* Recover from a failed assertion before probing */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
- return rc;
+ goto fail2;
/* Let the MC (and BMC, if this is a LOM) know that the driver
* is loaded. We should do this before we reset the NIC.
@@ -90,7 +105,7 @@ int efx_mcdi_init(struct efx_nic *efx)
if (rc) {
netif_err(efx, probe, efx->net_dev,
"Unable to register driver with MCPU\n");
- return rc;
+ goto fail2;
}
if (already_attached)
/* Not a fatal error */
@@ -102,6 +117,15 @@ int efx_mcdi_init(struct efx_nic *efx)
efx->primary = efx;
return 0;
+fail2:
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ free_page((unsigned long)mcdi->logging_buffer);
+fail1:
+#endif
+ kfree(efx->mcdi);
+ efx->mcdi = NULL;
+fail:
+ return rc;
}
void efx_mcdi_fini(struct efx_nic *efx)
@@ -114,6 +138,10 @@ void efx_mcdi_fini(struct efx_nic *efx)
/* Relinquish the device (back to the BMC, if this is a LOM) */
efx_mcdi_drv_attach(efx, false, NULL);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ free_page((unsigned long)efx->mcdi->iface.logging_buffer);
+#endif
+
kfree(efx->mcdi);
}
@@ -121,6 +149,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
efx_dword_t hdr[2];
size_t hdr_len;
u32 xflags, seqno;
@@ -165,6 +196,31 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
hdr_len = 8;
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+ int bytes = 0;
+ int i;
+ /* Lengths should always be a whole number of dwords, so scream
+ * if they're not.
+ */
+ WARN_ON_ONCE(hdr_len % 4);
+ WARN_ON_ONCE(inlen % 4);
+
+ /* We own the logging buffer, as only one MCDI can be in
+ * progress on a NIC at any one time. So no need for locking.
+ */
+ for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr[i].u32[0]));
+
+ for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(inbuf[i].u32[0]));
+
+ netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
+ }
+#endif
+
efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
mcdi->new_epoch = false;
@@ -206,6 +262,9 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned int respseq, respcmd, error;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
efx_dword_t hdr;
efx->type->mcdi_read_response(efx, &hdr, 0, 4);
@@ -223,6 +282,39 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+ size_t hdr_len, data_len;
+ int bytes = 0;
+ int i;
+
+ WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
+ hdr_len = mcdi->resp_hdr_len / 4;
+ /* MCDI_DECLARE_BUF ensures that underlying buffer is padded
+ * to dword size, and the MCDI buffer is always dword size
+ */
+ data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
+
+ /* We own the logging buffer, as only one MCDI can be in
+ * progress on a NIC at any one time. So no need for locking.
+ */
+ for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
+ efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr.u32[0]));
+ }
+
+ for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
+ efx->type->mcdi_read_response(efx, &hdr,
+ mcdi->resp_hdr_len + (i * 4), 4);
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr.u32[0]));
+ }
+
+ netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
+ }
+#endif
+
if (error && mcdi->resp_data_len == 0) {
netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
mcdi->resprc = -EIO;
@@ -406,7 +498,7 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
struct efx_mcdi_async_param *async;
size_t hdr_len, data_len, err_len;
efx_dword_t *outbuf;
- MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ MCDI_DECLARE_BUF_ERR(errbuf);
int rc;
if (cmpxchg(&mcdi->state,
@@ -534,7 +626,7 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
size_t *outlen_actual, bool quiet)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ MCDI_DECLARE_BUF_ERR(errbuf);
int rc;
if (mcdi->mode == MCDI_MODE_POLL)
@@ -1035,7 +1127,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
/* MAC stats are gather lazily. We can ignore this. */
break;
case MCDI_EVENT_CODE_FLR:
- efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
+ if (efx->type->sriov_flr)
+ efx->type->sriov_flr(efx,
+ MCDI_EVENT_FIELD(*event, FLR_VF));
break;
case MCDI_EVENT_CODE_PTP_RX:
case MCDI_EVENT_CODE_PTP_FAULT:
@@ -1081,9 +1175,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
{
- MCDI_DECLARE_BUF(outbuf,
- max(MC_CMD_GET_VERSION_OUT_LEN,
- MC_CMD_GET_CAPABILITIES_OUT_LEN));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
size_t outlength;
const __le16 *ver_words;
size_t offset;
@@ -1108,19 +1200,11 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
* single version. Report which variants are running.
*/
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
- outbuf, sizeof(outbuf), &outlength);
- if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
- offset += snprintf(
- buf + offset, len - offset, " rx? tx?");
- else
- offset += snprintf(
- buf + offset, len - offset, " rx%x tx%x",
- MCDI_WORD(outbuf,
- GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
- MCDI_WORD(outbuf,
- GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ offset += snprintf(buf + offset, len - offset, " rx%x tx%x",
+ nic_data->rx_dpcpu_fw_id,
+ nic_data->tx_dpcpu_fw_id);
/* It's theoretically possible for the string to exceed 31
* characters, though in practice the first three version
@@ -1150,10 +1234,26 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
- rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ /* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
+ * specified will fail with EPERM, and we have to tell the MC we don't
+ * care what firmware we get.
+ */
+ if (rc == -EPERM) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
+ MC_CMD_FW_DONT_CARE);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf),
+ &outlen);
+ }
+ if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
+ outbuf, outlen, rc);
goto fail;
+ }
if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
rc = -EIO;
goto fail;
@@ -1178,16 +1278,6 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
* and are completely trusted by firmware. Abort probing
* if that's not true for this function.
*/
- if (driver_operating &&
- (efx->mcdi->fn_flags &
- (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
- 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
- (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
- 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
- netif_err(efx, probe, efx->net_dev,
- "This driver version only supports one function per port\n");
- return -ENODEV;
- }
if (was_attached != NULL)
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
@@ -1385,10 +1475,13 @@ fail1:
return rc;
}
+/* Returns 1 if an assertion was read, 0 if no assertion had fired,
+ * negative on error.
+ */
static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
unsigned int flags, index;
const char *reason;
size_t outlen;
@@ -1406,6 +1499,8 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
outbuf, sizeof(outbuf), &outlen);
+ if (rc == -EPERM)
+ return 0;
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
if (rc) {
@@ -1443,24 +1538,31 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
index));
- return 0;
+ return 1;
}
-static void efx_mcdi_exit_assertion(struct efx_nic *efx)
+static int efx_mcdi_exit_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
+ int rc;
/* If the MC is running debug firmware, it might now be
* waiting for a debugger to attach, but we just want it to
* reboot. We set a flag that makes the command a no-op if it
- * has already done so. We don't know what return code to
- * expect (0 or -EIO), so ignore it.
+ * has already done so.
+ * The MCDI will thus return either 0 or -EIO.
*/
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
- (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, NULL);
+ if (rc == -EIO)
+ rc = 0;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, rc);
+ return rc;
}
int efx_mcdi_handle_assertion(struct efx_nic *efx)
@@ -1468,12 +1570,10 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
int rc;
rc = efx_mcdi_read_assertion(efx);
- if (rc)
+ if (rc <= 0)
return rc;
- efx_mcdi_exit_assertion(efx);
-
- return 0;
+ return efx_mcdi_exit_assertion(efx);
}
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
@@ -1550,7 +1650,9 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
if (rc)
return rc;
- if (method == RESET_TYPE_WORLD)
+ if (method == RESET_TYPE_DATAPATH)
+ return 0;
+ else if (method == RESET_TYPE_WORLD)
return efx_mcdi_reset_mc(efx);
else
return efx_mcdi_reset_func(efx);
@@ -1688,6 +1790,36 @@ int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
NULL, 0, NULL);
}
+int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
+ unsigned int *enabled_out)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ if (impl_out)
+ *impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);
+
+ if (enabled_out)
+ *enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
#ifdef CONFIG_SFC_MTD
#define EFX_MCDI_NVRAM_LEN_MAX 128
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 56465f7465a2..1838afe2da92 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -58,6 +58,8 @@ enum efx_mcdi_mode {
* enabled
* @async_list: Queue of asynchronous requests
* @async_timer: Timer for asynchronous request timeout
+ * @logging_buffer: buffer that may be used to build MCDI tracing messages
+ * @logging_enabled: whether to trace MCDI
*/
struct efx_mcdi_iface {
struct efx_nic *efx;
@@ -74,6 +76,10 @@ struct efx_mcdi_iface {
spinlock_t async_lock;
struct list_head async_list;
struct timer_list async_timer;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *logging_buffer;
+ bool logging_enabled;
+#endif
};
struct efx_mcdi_mon {
@@ -176,10 +182,12 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
* 32-bit-aligned. Also, on Siena we must copy to the MC shared
* memory strictly 32 bits at a time, so add any necessary padding.
*/
-#define MCDI_DECLARE_BUF(_name, _len) \
+#define _MCDI_DECLARE_BUF(_name, _len) \
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
-#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \
- MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8))
+#define MCDI_DECLARE_BUF(_name, _len) \
+ _MCDI_DECLARE_BUF(_name, _len) = {{{0}}}
+#define MCDI_DECLARE_BUF_ERR(_name) \
+ MCDI_DECLARE_BUF(_name, 8)
#define _MCDI_PTR(_buf, _offset) \
((u8 *)(_buf) + (_offset))
#define MCDI_PTR(_buf, _field) \
@@ -339,6 +347,8 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
+ unsigned int *enabled_out);
#ifdef CONFIG_SFC_MCDI_MON
int efx_mcdi_mon_probe(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index e028de10e1b7..45fca9fc66b7 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -638,6 +638,8 @@
*/
#define MC_CMD_READ32 0x1
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ32_IN msgrequest */
#define MC_CMD_READ32_IN_LEN 8
#define MC_CMD_READ32_IN_ADDR_OFST 0
@@ -659,6 +661,8 @@
*/
#define MC_CMD_WRITE32 0x2
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_WRITE32_IN msgrequest */
#define MC_CMD_WRITE32_IN_LENMIN 8
#define MC_CMD_WRITE32_IN_LENMAX 252
@@ -679,6 +683,8 @@
*/
#define MC_CMD_COPYCODE 0x3
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_COPYCODE_IN msgrequest */
#define MC_CMD_COPYCODE_IN_LEN 16
/* Source address */
@@ -717,6 +723,8 @@
*/
#define MC_CMD_SET_FUNC 0x4
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_FUNC_IN msgrequest */
#define MC_CMD_SET_FUNC_IN_LEN 4
/* Set function */
@@ -732,6 +740,8 @@
*/
#define MC_CMD_GET_BOOT_STATUS 0x5
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
@@ -758,6 +768,8 @@
*/
#define MC_CMD_GET_ASSERTS 0x6
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_ASSERTS_IN msgrequest */
#define MC_CMD_GET_ASSERTS_IN_LEN 4
/* Set to clear assertion */
@@ -794,6 +806,8 @@
*/
#define MC_CMD_LOG_CTRL 0x7
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_LOG_CTRL_IN msgrequest */
#define MC_CMD_LOG_CTRL_IN_LEN 8
/* Log destination */
@@ -814,6 +828,8 @@
*/
#define MC_CMD_GET_VERSION 0x8
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VERSION_IN msgrequest */
#define MC_CMD_GET_VERSION_IN_LEN 0
@@ -870,6 +886,8 @@
*/
#define MC_CMD_PTP 0xb
+#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_PTP_IN msgrequest */
#define MC_CMD_PTP_IN_LEN 1
/* PTP operation code */
@@ -1404,6 +1422,8 @@
*/
#define MC_CMD_CSR_READ32 0xc
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_CSR_READ32_IN msgrequest */
#define MC_CMD_CSR_READ32_IN_LEN 12
/* Address */
@@ -1428,6 +1448,8 @@
*/
#define MC_CMD_CSR_WRITE32 0xd
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_CSR_WRITE32_IN msgrequest */
#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
@@ -1452,6 +1474,8 @@
*/
#define MC_CMD_HP 0x54
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_HP_IN msgrequest */
#define MC_CMD_HP_IN_LEN 16
/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
@@ -1493,6 +1517,8 @@
*/
#define MC_CMD_STACKINFO 0xf
+#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_STACKINFO_IN msgrequest */
#define MC_CMD_STACKINFO_IN_LEN 0
@@ -1513,6 +1539,8 @@
*/
#define MC_CMD_MDIO_READ 0x10
+#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_MDIO_READ_IN msgrequest */
#define MC_CMD_MDIO_READ_IN_LEN 16
/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
@@ -1552,6 +1580,8 @@
*/
#define MC_CMD_MDIO_WRITE 0x11
+#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_MDIO_WRITE_IN msgrequest */
#define MC_CMD_MDIO_WRITE_IN_LEN 20
/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
@@ -1591,6 +1621,8 @@
*/
#define MC_CMD_DBI_WRITE 0x12
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DBI_WRITE_IN msgrequest */
#define MC_CMD_DBI_WRITE_IN_LENMIN 12
#define MC_CMD_DBI_WRITE_IN_LENMAX 252
@@ -1739,6 +1771,8 @@
*/
#define MC_CMD_GET_BOARD_CFG 0x18
+#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
@@ -1778,6 +1812,8 @@
*/
#define MC_CMD_DBI_READX 0x19
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DBI_READX_IN msgrequest */
#define MC_CMD_DBI_READX_IN_LENMIN 8
#define MC_CMD_DBI_READX_IN_LENMAX 248
@@ -1822,6 +1858,8 @@
*/
#define MC_CMD_SET_RAND_SEED 0x1a
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_RAND_SEED_IN msgrequest */
#define MC_CMD_SET_RAND_SEED_IN_LEN 16
/* Seed value. */
@@ -1863,6 +1901,8 @@
*/
#define MC_CMD_DRV_ATTACH 0x1c
+#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_DRV_ATTACH_IN msgrequest */
#define MC_CMD_DRV_ATTACH_IN_LEN 12
/* new state (0=detached, 1=attached) to set if UPDATE=1 */
@@ -1875,6 +1915,8 @@
#define MC_CMD_FW_FULL_FEATURED 0x0
/* enum: Prefer to use firmware with fewer features but lower latency */
#define MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Only this option is allowed for non-admin functions */
+#define MC_CMD_FW_DONT_CARE 0xffffffff
/* MC_CMD_DRV_ATTACH_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
@@ -1920,6 +1962,8 @@
*/
#define MC_CMD_PORT_RESET 0x20
+#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_PORT_RESET_IN msgrequest */
#define MC_CMD_PORT_RESET_IN_LEN 0
@@ -1934,6 +1978,7 @@
* extended version of the deprecated MC_CMD_PORT_RESET with added fields.
*/
#define MC_CMD_ENTITY_RESET 0x20
+/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */
/* MC_CMD_ENTITY_RESET_IN msgrequest */
#define MC_CMD_ENTITY_RESET_IN_LEN 4
@@ -2023,6 +2068,8 @@
*/
#define MC_CMD_PUTS 0x23
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PUTS_IN msgrequest */
#define MC_CMD_PUTS_IN_LENMIN 13
#define MC_CMD_PUTS_IN_LENMAX 252
@@ -2050,6 +2097,8 @@
*/
#define MC_CMD_GET_PHY_CFG 0x24
+#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PHY_CFG_IN msgrequest */
#define MC_CMD_GET_PHY_CFG_IN_LEN 0
@@ -2149,6 +2198,8 @@
*/
#define MC_CMD_START_BIST 0x25
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
/* Type of test. */
@@ -2185,6 +2236,8 @@
*/
#define MC_CMD_POLL_BIST 0x26
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_POLL_BIST_IN msgrequest */
#define MC_CMD_POLL_BIST_IN_LEN 0
@@ -2344,6 +2397,8 @@
*/
#define MC_CMD_GET_LOOPBACK_MODES 0x28
+#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
@@ -2463,6 +2518,8 @@
*/
#define MC_CMD_GET_LINK 0x29
+#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_LINK_IN msgrequest */
#define MC_CMD_GET_LINK_IN_LEN 0
@@ -2519,6 +2576,8 @@
*/
#define MC_CMD_SET_LINK 0x2a
+#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_SET_LINK_IN msgrequest */
#define MC_CMD_SET_LINK_IN_LEN 16
/* ??? */
@@ -2550,6 +2609,8 @@
*/
#define MC_CMD_SET_ID_LED 0x2b
+#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_SET_ID_LED_IN msgrequest */
#define MC_CMD_SET_ID_LED_IN_LEN 4
/* Set LED state. */
@@ -2568,6 +2629,8 @@
*/
#define MC_CMD_SET_MAC 0x2c
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_SET_MAC_IN msgrequest */
#define MC_CMD_SET_MAC_IN_LEN 24
/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
@@ -2609,6 +2672,8 @@
*/
#define MC_CMD_PHY_STATS 0x2d
+#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_PHY_STATS_IN msgrequest */
#define MC_CMD_PHY_STATS_IN_LEN 8
/* ??? */
@@ -2687,8 +2752,10 @@
*/
#define MC_CMD_MAC_STATS 0x2e
+#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_MAC_STATS_IN msgrequest */
-#define MC_CMD_MAC_STATS_IN_LEN 16
+#define MC_CMD_MAC_STATS_IN_LEN 20
/* ??? */
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
@@ -2710,6 +2777,8 @@
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -2824,11 +2893,31 @@
/* enum: RXDP counter: Number of times an emergency descriptor fetch was
* performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47
+#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
/* enum: RXDP counter: Number of times the DPCPU waited for an existing
* descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
/* enum: Start of GMAC stats buffer space, for Siena only. */
#define MC_CMD_GMAC_DMABUF_START 0x40
/* enum: End of GMAC stats buffer space, for Siena only. */
@@ -2926,6 +3015,8 @@
*/
#define MC_CMD_WOL_FILTER_SET 0x32
+#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
@@ -3020,6 +3111,8 @@
*/
#define MC_CMD_WOL_FILTER_REMOVE 0x33
+#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
@@ -3035,6 +3128,8 @@
*/
#define MC_CMD_WOL_FILTER_RESET 0x34
+#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
@@ -3069,6 +3164,8 @@
*/
#define MC_CMD_NVRAM_TYPES 0x36
+#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_TYPES_IN msgrequest */
#define MC_CMD_NVRAM_TYPES_IN_LEN 0
@@ -3125,6 +3222,8 @@
*/
#define MC_CMD_NVRAM_INFO 0x37
+#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_INFO_IN msgrequest */
#define MC_CMD_NVRAM_INFO_IN_LEN 4
#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
@@ -3157,6 +3256,8 @@
*/
#define MC_CMD_NVRAM_UPDATE_START 0x38
+#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */
#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
@@ -3175,6 +3276,8 @@
*/
#define MC_CMD_NVRAM_READ 0x39
+#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_READ_IN msgrequest */
#define MC_CMD_NVRAM_READ_IN_LEN 12
#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
@@ -3202,6 +3305,8 @@
*/
#define MC_CMD_NVRAM_WRITE 0x3a
+#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_WRITE_IN msgrequest */
#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
@@ -3228,6 +3333,8 @@
*/
#define MC_CMD_NVRAM_ERASE 0x3b
+#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_ERASE_IN msgrequest */
#define MC_CMD_NVRAM_ERASE_IN_LEN 12
#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
@@ -3248,6 +3355,8 @@
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
@@ -3279,6 +3388,8 @@
*/
#define MC_CMD_REBOOT 0x3d
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
@@ -3316,6 +3427,8 @@
*/
#define MC_CMD_REBOOT_MODE 0x3f
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_REBOOT_MODE_IN msgrequest */
#define MC_CMD_REBOOT_MODE_IN_LEN 4
#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
@@ -3368,6 +3481,8 @@
*/
#define MC_CMD_SENSOR_INFO 0x41
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SENSOR_INFO_IN msgrequest */
#define MC_CMD_SENSOR_INFO_IN_LEN 0
@@ -3542,6 +3657,8 @@
*/
#define MC_CMD_READ_SENSORS 0x42
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ_SENSORS_IN msgrequest */
#define MC_CMD_READ_SENSORS_IN_LEN 8
/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
@@ -3602,6 +3719,8 @@
*/
#define MC_CMD_GET_PHY_STATE 0x43
+#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PHY_STATE_IN msgrequest */
#define MC_CMD_GET_PHY_STATE_IN_LEN 0
@@ -3636,6 +3755,8 @@
*/
#define MC_CMD_WOL_FILTER_GET 0x45
+#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
@@ -3651,6 +3772,8 @@
*/
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
@@ -3692,6 +3815,8 @@
*/
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
@@ -3722,6 +3847,8 @@
*/
#define MC_CMD_TESTASSERT 0x49
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_TESTASSERT_IN msgrequest */
#define MC_CMD_TESTASSERT_IN_LEN 0
@@ -3739,6 +3866,8 @@
*/
#define MC_CMD_WORKAROUND 0x4a
+#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_WORKAROUND_IN msgrequest */
#define MC_CMD_WORKAROUND_IN_LEN 8
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
@@ -3765,6 +3894,8 @@
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
@@ -3788,6 +3919,8 @@
*/
#define MC_CMD_NVRAM_TEST 0x4c
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
@@ -3849,6 +3982,8 @@
*/
#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
@@ -3890,6 +4025,8 @@
*/
#define MC_CMD_NVRAM_PARTITIONS 0x51
+#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
@@ -3913,6 +4050,8 @@
*/
#define MC_CMD_NVRAM_METADATA 0x52
+#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_METADATA_IN msgrequest */
#define MC_CMD_NVRAM_METADATA_IN_LEN 4
/* Partition type ID code */
@@ -3958,6 +4097,8 @@
*/
#define MC_CMD_GET_MAC_ADDRESSES 0x55
+#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
@@ -4087,11 +4228,66 @@
/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
/* MC_CMD_READ_REGS
* Get a dump of the MCPU registers
*/
#define MC_CMD_READ_REGS 0x50
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ_REGS_IN msgrequest */
#define MC_CMD_READ_REGS_IN_LEN 0
@@ -4115,6 +4311,8 @@
*/
#define MC_CMD_INIT_EVQ 0x80
+#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_INIT_EVQ_IN msgrequest */
#define MC_CMD_INIT_EVQ_IN_LENMIN 44
#define MC_CMD_INIT_EVQ_IN_LENMAX 548
@@ -4213,6 +4411,8 @@
*/
#define MC_CMD_INIT_RXQ 0x81
+#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_INIT_RXQ_IN msgrequest */
#define MC_CMD_INIT_RXQ_IN_LENMIN 36
#define MC_CMD_INIT_RXQ_IN_LENMAX 252
@@ -4265,6 +4465,8 @@
*/
#define MC_CMD_INIT_TXQ 0x82
+#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_INIT_TXQ_IN msgrequest */
#define MC_CMD_INIT_TXQ_IN_LENMIN 36
#define MC_CMD_INIT_TXQ_IN_LENMAX 252
@@ -4322,6 +4524,8 @@
*/
#define MC_CMD_FINI_EVQ 0x83
+#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FINI_EVQ_IN msgrequest */
#define MC_CMD_FINI_EVQ_IN_LEN 4
/* Instance of EVQ to destroy. Should be the same instance as that previously
@@ -4339,6 +4543,8 @@
*/
#define MC_CMD_FINI_RXQ 0x84
+#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FINI_RXQ_IN msgrequest */
#define MC_CMD_FINI_RXQ_IN_LEN 4
/* Instance of RXQ to destroy */
@@ -4354,6 +4560,8 @@
*/
#define MC_CMD_FINI_TXQ 0x85
+#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FINI_TXQ_IN msgrequest */
#define MC_CMD_FINI_TXQ_IN_LEN 4
/* Instance of TXQ to destroy */
@@ -4369,6 +4577,8 @@
*/
#define MC_CMD_DRIVER_EVENT 0x86
+#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_DRIVER_EVENT_IN msgrequest */
#define MC_CMD_DRIVER_EVENT_IN_LEN 12
/* Handle of target EVQ */
@@ -4392,6 +4602,8 @@
*/
#define MC_CMD_PROXY_CMD 0x5b
+#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PROXY_CMD_IN msgrequest */
#define MC_CMD_PROXY_CMD_IN_LEN 4
/* The handle of the target function. */
@@ -4414,6 +4626,8 @@
*/
#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
/* Owner ID to use */
@@ -4437,6 +4651,8 @@
*/
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
@@ -4463,6 +4679,8 @@
*/
#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
@@ -4477,6 +4695,8 @@
*/
#define MC_CMD_FILTER_OP 0x8a
+#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FILTER_OP_IN msgrequest */
#define MC_CMD_FILTER_OP_IN_LEN 108
/* identifies the type of operation requested */
@@ -4637,6 +4857,8 @@
*/
#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
/* identifies the type of operation requested */
@@ -4669,6 +4891,8 @@
*/
#define MC_CMD_PARSER_DISP_RW 0xe5
+#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
/* identifies the target of the operation */
@@ -4719,6 +4943,8 @@
*/
#define MC_CMD_GET_PF_COUNT 0xb6
+#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PF_COUNT_IN msgrequest */
#define MC_CMD_GET_PF_COUNT_IN_LEN 0
@@ -4750,6 +4976,8 @@
*/
#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
@@ -4765,6 +4993,8 @@
*/
#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
/* Identifies the port assignment for this function. */
@@ -4780,6 +5010,8 @@
*/
#define MC_CMD_ALLOC_VIS 0x8b
+#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_ALLOC_VIS_IN msgrequest */
#define MC_CMD_ALLOC_VIS_IN_LEN 8
/* The minimum number of VIs that is acceptable */
@@ -4804,6 +5036,8 @@
*/
#define MC_CMD_FREE_VIS 0x8c
+#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FREE_VIS_IN msgrequest */
#define MC_CMD_FREE_VIS_IN_LEN 0
@@ -4817,6 +5051,8 @@
*/
#define MC_CMD_GET_SRIOV_CFG 0xba
+#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
@@ -4841,6 +5077,8 @@
*/
#define MC_CMD_SET_SRIOV_CFG 0xbb
+#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
/* Number of VFs currently enabled. */
@@ -4870,6 +5108,8 @@
*/
#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
@@ -4889,6 +5129,8 @@
*/
#define MC_CMD_DUMP_VI_STATE 0x8e
+#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
/* The VI number to query. */
@@ -4998,6 +5240,8 @@
*/
#define MC_CMD_ALLOC_PIOBUF 0x8f
+#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
@@ -5013,6 +5257,8 @@
*/
#define MC_CMD_FREE_PIOBUF 0x90
+#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_FREE_PIOBUF_IN msgrequest */
#define MC_CMD_FREE_PIOBUF_IN_LEN 4
/* Handle for allocated push I/O buffer. */
@@ -5028,6 +5274,8 @@
*/
#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
/* VI number to get information for. */
@@ -5062,6 +5310,8 @@
*/
#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
/* VI number to set information for. */
@@ -5096,6 +5346,8 @@
*/
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
@@ -5157,6 +5409,8 @@
*/
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
@@ -5203,6 +5457,8 @@
*/
#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
* are subtle, and so downloads must proceed in a number of phases.
*
@@ -5318,6 +5574,7 @@
*/
#define MC_CMD_GET_CAPABILITIES 0xbe
+#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
@@ -5343,6 +5600,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
/* RxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
@@ -5433,6 +5692,8 @@
*/
#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
@@ -5448,6 +5709,8 @@
*/
#define MC_CMD_TCM_BUCKET_FREE 0xb3
+#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
/* the bucket id */
@@ -5463,6 +5726,8 @@
*/
#define MC_CMD_TCM_BUCKET_INIT 0xb4
+#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
/* the bucket id */
@@ -5480,6 +5745,8 @@
*/
#define MC_CMD_TCM_TXQ_INIT 0xb5
+#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
/* the txq id */
@@ -5511,6 +5778,8 @@
*/
#define MC_CMD_LINK_PIOBUF 0x92
+#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_LINK_PIOBUF_IN msgrequest */
#define MC_CMD_LINK_PIOBUF_IN_LEN 8
/* Handle for allocated push I/O buffer. */
@@ -5528,6 +5797,8 @@
*/
#define MC_CMD_UNLINK_PIOBUF 0x93
+#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
/* Function Local Instance (VI) number. */
@@ -5543,6 +5814,8 @@
*/
#define MC_CMD_VSWITCH_ALLOC 0x94
+#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
/* The port to connect to the v-switch's upstream port. */
@@ -5572,6 +5845,8 @@
*/
#define MC_CMD_VSWITCH_FREE 0x95
+#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VSWITCH_FREE_IN msgrequest */
#define MC_CMD_VSWITCH_FREE_IN_LEN 4
/* The port to which the v-switch is connected. */
@@ -5587,6 +5862,8 @@
*/
#define MC_CMD_VPORT_ALLOC 0x96
+#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_ALLOC_IN msgrequest */
#define MC_CMD_VPORT_ALLOC_IN_LEN 20
/* The port to which the v-switch is connected. */
@@ -5636,6 +5913,8 @@
*/
#define MC_CMD_VPORT_FREE 0x97
+#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_FREE_IN msgrequest */
#define MC_CMD_VPORT_FREE_IN_LEN 4
/* The handle of the v-port */
@@ -5651,8 +5930,10 @@
*/
#define MC_CMD_VADAPTOR_ALLOC 0x98
+#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
-#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
/* The port to connect to the v-adaptor's port. */
#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
/* Flags controlling v-adaptor creation */
@@ -5661,6 +5942,19 @@
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
/* The number of VLAN tags to strip on receive */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+/* The number of VLAN tags to transparently insert/remove. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+/* The MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
+/* enum: Derive the MAC address from the upstream port */
+#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
@@ -5672,6 +5966,8 @@
*/
#define MC_CMD_VADAPTOR_FREE 0x99
+#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
/* The port to which the v-adaptor is connected. */
@@ -5682,11 +5978,53 @@
/***********************************/
+/* MC_CMD_VADAPTOR_SET_MAC
+ * assign a new MAC address to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_SET_MAC 0x5d
+
+#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The new MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
+
+/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_GET_MAC
+ * read the MAC address assigned to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_GET_MAC 0x5e
+
+#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
+/* The MAC address assigned to this v-adaptor */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
+
+
+/***********************************/
/* MC_CMD_EVB_PORT_ASSIGN
* assign a port to a PCI function.
*/
#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
/* The port to assign. */
@@ -5708,6 +6046,8 @@
*/
#define MC_CMD_RDWR_A64_REGIONS 0x9b
+#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
@@ -5736,6 +6076,8 @@
*/
#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
/* The handle of the owning upstream port */
@@ -5753,6 +6095,8 @@
*/
#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
/* The handle of the Onload stack */
@@ -5768,6 +6112,8 @@
*/
#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
/* The handle of the owning upstream port */
@@ -5800,6 +6146,8 @@
*/
#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
/* The handle of the RSS context */
@@ -5815,6 +6163,8 @@
*/
#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
/* The handle of the RSS context */
@@ -5833,6 +6183,8 @@
*/
#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
/* The handle of the RSS context */
@@ -5851,6 +6203,8 @@
*/
#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
/* The handle of the RSS context */
@@ -5869,6 +6223,8 @@
*/
#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
/* The handle of the RSS context */
@@ -5887,6 +6243,8 @@
*/
#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
@@ -5912,6 +6270,8 @@
*/
#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
/* The handle of the RSS context */
@@ -5937,6 +6297,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
/* The handle of the owning upstream port */
@@ -5959,6 +6321,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
/* The handle of the .1p mapping */
@@ -5974,6 +6338,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
/* The handle of the .1p mapping */
@@ -5994,6 +6360,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
/* The handle of the .1p mapping */
@@ -6014,6 +6382,8 @@
*/
#define MC_CMD_GET_VECTOR_CFG 0xbf
+#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
@@ -6033,6 +6403,8 @@
*/
#define MC_CMD_SET_VECTOR_CFG 0xc0
+#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
@@ -6423,6 +6795,8 @@
*/
#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
@@ -6441,6 +6815,8 @@
*/
#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
@@ -6459,6 +6835,8 @@
*/
#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
/* The handle of the v-port */
@@ -6486,6 +6864,8 @@
*/
#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
/* Index of the first buffer table entry. */
@@ -6510,6 +6890,8 @@
*/
#define MC_CMD_SET_RXDP_CONFIG 0xc1
+#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
@@ -6526,6 +6908,8 @@
*/
#define MC_CMD_GET_RXDP_CONFIG 0xc2
+#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
@@ -6890,6 +7274,8 @@
*/
#define MC_CMD_GET_CLOCK 0xac
+#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_CLOCK_IN msgrequest */
#define MC_CMD_GET_CLOCK_IN_LEN 0
@@ -6907,6 +7293,8 @@
*/
#define MC_CMD_SET_CLOCK 0xad
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_CLOCK_IN msgrequest */
#define MC_CMD_SET_CLOCK_IN_LEN 12
/* Requested system frequency in MHz; 0 leaves unchanged. */
@@ -6932,6 +7320,8 @@
*/
#define MC_CMD_DPCPU_RPC 0xae
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DPCPU_RPC_IN msgrequest */
#define MC_CMD_DPCPU_RPC_IN_LEN 36
#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
@@ -7016,6 +7406,8 @@
*/
#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
/* Interrupt level relative to base for function. */
@@ -7031,6 +7423,8 @@
*/
#define MC_CMD_CAP_BLK_READ 0xe7
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_CAP_BLK_READ_IN msgrequest */
#define MC_CMD_CAP_BLK_READ_IN_LEN 12
#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
@@ -7055,6 +7449,8 @@
*/
#define MC_CMD_DUMP_DO 0xe8
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DUMP_DO_IN msgrequest */
#define MC_CMD_DUMP_DO_IN_LEN 52
#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
@@ -7108,6 +7504,8 @@
*/
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
@@ -7151,6 +7549,8 @@
*/
#define MC_CMD_SET_PSU 0xea
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_PSU_IN msgrequest */
#define MC_CMD_SET_PSU_IN_LEN 12
#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
@@ -7171,6 +7571,8 @@
*/
#define MC_CMD_GET_FUNCTION_INFO 0xec
+#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
@@ -7188,6 +7590,8 @@
*/
#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
@@ -7203,6 +7607,8 @@
*/
#define MC_CMD_UART_SEND_DATA 0xee
+#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
@@ -7231,6 +7637,8 @@
*/
#define MC_CMD_UART_RECV_DATA 0xef
+#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
/* CRC32 over OFFSET, LENGTH, RESERVED */
@@ -7266,6 +7674,8 @@
*/
#define MC_CMD_READ_FUSES 0xf0
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ_FUSES_IN msgrequest */
#define MC_CMD_READ_FUSES_IN_LEN 8
/* Offset in OTP to read */
@@ -7292,6 +7702,8 @@
*/
#define MC_CMD_KR_TUNE 0xf1
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_KR_TUNE_IN msgrequest */
#define MC_CMD_KR_TUNE_IN_LENMIN 4
#define MC_CMD_KR_TUNE_IN_LENMAX 252
@@ -7550,6 +7962,8 @@
*/
#define MC_CMD_PCIE_TUNE 0xf2
+#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PCIE_TUNE_IN msgrequest */
#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
@@ -7711,6 +8125,8 @@
*/
#define MC_CMD_LICENSING 0xf3
+#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_LICENSING_IN msgrequest */
#define MC_CMD_LICENSING_IN_LEN 4
/* identifies the type of operation requested */
@@ -7756,6 +8172,8 @@
*/
#define MC_CMD_MC2MC_PROXY 0xf4
+#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_MC2MC_PROXY_IN msgrequest */
#define MC_CMD_MC2MC_PROXY_IN_LEN 0
@@ -7771,6 +8189,8 @@
*/
#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
/* application ID to query (LICENSED_APP_ID_xxx) */
@@ -7792,6 +8212,8 @@
*/
#define MC_CMD_LICENSED_APP_OP 0xf6
+#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
@@ -7847,6 +8269,8 @@
*/
#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
/* configuration flags */
@@ -7881,6 +8305,8 @@
*/
#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index fb19b70eac01..7f295c4d7b80 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -865,6 +865,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+ /* This has no effect on EF10 */
ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
efx->net_dev->dev_addr);
@@ -923,6 +924,7 @@ enum efx_stats_action {
static int efx_mcdi_mac_stats(struct efx_nic *efx,
enum efx_stats_action action, int clear)
{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc;
int change = action == EFX_STATS_PULL ? 0 : 1;
@@ -944,9 +946,14 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ /* Expect ENOENT if DMA queues have not been set up */
+ if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues)))
+ efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf),
+ NULL, 0, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 325dd94bca46..d72f522bf9c3 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -25,6 +25,7 @@
#include <linux/highmem.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/vmalloc.h>
#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
@@ -793,7 +794,6 @@ union efx_multicast_hash {
efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};
-struct efx_vf;
struct vfdi_status;
/**
@@ -897,7 +897,8 @@ struct vfdi_status;
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
- * @filter_lock: Filter table lock
+ * @filter_sem: Filter table rw_semaphore, for freeing the table
+ * @filter_lock: Filter table lock, for mere content changes
* @filter_state: Architecture-dependent filter table state
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID
@@ -909,7 +910,6 @@ struct vfdi_status;
* completed (either success or failure). Not used when MCDI is used to
* flush receive queues.
* @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
- * @vf: Array of &struct efx_vf objects.
* @vf_count: Number of VFs intended to be enabled.
* @vf_init_count: Number of VFs that have been fully initialised.
* @vi_scale: log2 number of vnics per VF.
@@ -1040,6 +1040,7 @@ struct efx_nic {
void *loopback_selftest;
+ struct rw_semaphore filter_sem;
spinlock_t filter_lock;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
@@ -1053,7 +1054,6 @@ struct efx_nic {
wait_queue_head_t flush_wq;
#ifdef CONFIG_SFC_SRIOV
- struct efx_vf *vf;
unsigned vf_count;
unsigned vf_init_count;
unsigned vi_scale;
@@ -1092,6 +1092,7 @@ struct efx_mtd_partition {
/**
* struct efx_nic_type - Efx device type definition
+ * @mem_bar: Get the memory BAR
* @mem_map_size: Get memory BAR mapped size
* @probe: Probe the controller
* @remove: Free resources allocated by probe()
@@ -1204,6 +1205,7 @@ struct efx_mtd_partition {
* @ptp_set_ts_config: Set hardware timestamp configuration. The flags
* and tx_type will already have been validated but this operation
* must validate and update rx_filter.
+ * @set_mac_address: Set the MAC address of the device
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1226,6 +1228,8 @@ struct efx_mtd_partition {
* @hwtstamp_filters: Mask of hardware timestamp filter types supported
*/
struct efx_nic_type {
+ bool is_vf;
+ unsigned int mem_bar;
unsigned int (*mem_map_size)(struct efx_nic *efx);
int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx);
@@ -1277,7 +1281,8 @@ struct efx_nic_type {
void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue);
- void (*rx_push_rss_config)(struct efx_nic *efx);
+ int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
@@ -1330,11 +1335,28 @@ struct efx_nic_type {
int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
int (*ptp_set_ts_config)(struct efx_nic *efx,
struct hwtstamp_config *init);
+ int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
int (*sriov_init)(struct efx_nic *efx);
void (*sriov_fini)(struct efx_nic *efx);
- void (*sriov_mac_address_changed)(struct efx_nic *efx);
bool (*sriov_wanted)(struct efx_nic *efx);
void (*sriov_reset)(struct efx_nic *efx);
+ void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
+ int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac);
+ int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
+ u8 qos);
+ int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
+ bool spoofchk);
+ int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i,
+ struct ifla_vf_info *ivi);
+ int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i,
+ int link_state);
+ int (*sriov_get_phys_port_id)(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid);
+ int (*vswitching_probe)(struct efx_nic *efx);
+ int (*vswitching_restore)(struct efx_nic *efx);
+ void (*vswitching_remove)(struct efx_nic *efx);
+ int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
+ int (*set_mac_address)(struct efx_nic *efx);
int revision;
unsigned int txd_ptr_tbl_base;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 93d10cbbd1cf..31ff9084d9a4 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -381,6 +381,7 @@ enum {
* @efx: Pointer back to main interface structure
* @wol_filter_id: Wake-on-LAN packet filter id
* @stats: Hardware statistics
+ * @vf: Array of &struct siena_vf objects
* @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
* @vfdi_status: Common VFDI status page to be dmad to VF address space.
* @local_addr_list: List of local addresses. Protected by %local_lock.
@@ -394,6 +395,7 @@ struct siena_nic_data {
int wol_filter_id;
u64 stats[SIENA_STAT_COUNT];
#ifdef CONFIG_SFC_SRIOV
+ struct siena_vf *vf;
struct efx_channel *vfdi_channel;
unsigned vf_buftbl_base;
struct efx_buffer vfdi_status;
@@ -405,59 +407,77 @@ struct siena_nic_data {
};
enum {
- EF10_STAT_tx_bytes = GENERIC_STAT_COUNT,
- EF10_STAT_tx_packets,
- EF10_STAT_tx_pause,
- EF10_STAT_tx_control,
- EF10_STAT_tx_unicast,
- EF10_STAT_tx_multicast,
- EF10_STAT_tx_broadcast,
- EF10_STAT_tx_lt64,
- EF10_STAT_tx_64,
- EF10_STAT_tx_65_to_127,
- EF10_STAT_tx_128_to_255,
- EF10_STAT_tx_256_to_511,
- EF10_STAT_tx_512_to_1023,
- EF10_STAT_tx_1024_to_15xx,
- EF10_STAT_tx_15xx_to_jumbo,
- EF10_STAT_rx_bytes,
- EF10_STAT_rx_bytes_minus_good_bytes,
- EF10_STAT_rx_good_bytes,
- EF10_STAT_rx_bad_bytes,
- EF10_STAT_rx_packets,
- EF10_STAT_rx_good,
- EF10_STAT_rx_bad,
- EF10_STAT_rx_pause,
- EF10_STAT_rx_control,
+ EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT,
+ EF10_STAT_port_tx_packets,
+ EF10_STAT_port_tx_pause,
+ EF10_STAT_port_tx_control,
+ EF10_STAT_port_tx_unicast,
+ EF10_STAT_port_tx_multicast,
+ EF10_STAT_port_tx_broadcast,
+ EF10_STAT_port_tx_lt64,
+ EF10_STAT_port_tx_64,
+ EF10_STAT_port_tx_65_to_127,
+ EF10_STAT_port_tx_128_to_255,
+ EF10_STAT_port_tx_256_to_511,
+ EF10_STAT_port_tx_512_to_1023,
+ EF10_STAT_port_tx_1024_to_15xx,
+ EF10_STAT_port_tx_15xx_to_jumbo,
+ EF10_STAT_port_rx_bytes,
+ EF10_STAT_port_rx_bytes_minus_good_bytes,
+ EF10_STAT_port_rx_good_bytes,
+ EF10_STAT_port_rx_bad_bytes,
+ EF10_STAT_port_rx_packets,
+ EF10_STAT_port_rx_good,
+ EF10_STAT_port_rx_bad,
+ EF10_STAT_port_rx_pause,
+ EF10_STAT_port_rx_control,
+ EF10_STAT_port_rx_unicast,
+ EF10_STAT_port_rx_multicast,
+ EF10_STAT_port_rx_broadcast,
+ EF10_STAT_port_rx_lt64,
+ EF10_STAT_port_rx_64,
+ EF10_STAT_port_rx_65_to_127,
+ EF10_STAT_port_rx_128_to_255,
+ EF10_STAT_port_rx_256_to_511,
+ EF10_STAT_port_rx_512_to_1023,
+ EF10_STAT_port_rx_1024_to_15xx,
+ EF10_STAT_port_rx_15xx_to_jumbo,
+ EF10_STAT_port_rx_gtjumbo,
+ EF10_STAT_port_rx_bad_gtjumbo,
+ EF10_STAT_port_rx_overflow,
+ EF10_STAT_port_rx_align_error,
+ EF10_STAT_port_rx_length_error,
+ EF10_STAT_port_rx_nodesc_drops,
+ EF10_STAT_port_rx_pm_trunc_bb_overflow,
+ EF10_STAT_port_rx_pm_discard_bb_overflow,
+ EF10_STAT_port_rx_pm_trunc_vfifo_full,
+ EF10_STAT_port_rx_pm_discard_vfifo_full,
+ EF10_STAT_port_rx_pm_trunc_qbb,
+ EF10_STAT_port_rx_pm_discard_qbb,
+ EF10_STAT_port_rx_pm_discard_mapping,
+ EF10_STAT_port_rx_dp_q_disabled_packets,
+ EF10_STAT_port_rx_dp_di_dropped_packets,
+ EF10_STAT_port_rx_dp_streaming_packets,
+ EF10_STAT_port_rx_dp_hlb_fetch,
+ EF10_STAT_port_rx_dp_hlb_wait,
EF10_STAT_rx_unicast,
+ EF10_STAT_rx_unicast_bytes,
EF10_STAT_rx_multicast,
+ EF10_STAT_rx_multicast_bytes,
EF10_STAT_rx_broadcast,
- EF10_STAT_rx_lt64,
- EF10_STAT_rx_64,
- EF10_STAT_rx_65_to_127,
- EF10_STAT_rx_128_to_255,
- EF10_STAT_rx_256_to_511,
- EF10_STAT_rx_512_to_1023,
- EF10_STAT_rx_1024_to_15xx,
- EF10_STAT_rx_15xx_to_jumbo,
- EF10_STAT_rx_gtjumbo,
- EF10_STAT_rx_bad_gtjumbo,
+ EF10_STAT_rx_broadcast_bytes,
+ EF10_STAT_rx_bad,
+ EF10_STAT_rx_bad_bytes,
EF10_STAT_rx_overflow,
- EF10_STAT_rx_align_error,
- EF10_STAT_rx_length_error,
- EF10_STAT_rx_nodesc_drops,
- EF10_STAT_rx_pm_trunc_bb_overflow,
- EF10_STAT_rx_pm_discard_bb_overflow,
- EF10_STAT_rx_pm_trunc_vfifo_full,
- EF10_STAT_rx_pm_discard_vfifo_full,
- EF10_STAT_rx_pm_trunc_qbb,
- EF10_STAT_rx_pm_discard_qbb,
- EF10_STAT_rx_pm_discard_mapping,
- EF10_STAT_rx_dp_q_disabled_packets,
- EF10_STAT_rx_dp_di_dropped_packets,
- EF10_STAT_rx_dp_streaming_packets,
- EF10_STAT_rx_dp_hlb_fetch,
- EF10_STAT_rx_dp_hlb_wait,
+ EF10_STAT_tx_unicast,
+ EF10_STAT_tx_unicast_bytes,
+ EF10_STAT_tx_multicast,
+ EF10_STAT_tx_multicast_bytes,
+ EF10_STAT_tx_broadcast,
+ EF10_STAT_tx_broadcast_bytes,
+ EF10_STAT_tx_bad,
+ EF10_STAT_tx_bad_bytes,
+ EF10_STAT_tx_overflow,
EF10_STAT_COUNT
};
@@ -483,12 +503,21 @@ enum {
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
* reboot
* @rx_rss_context: Firmware handle for our RSS context
+ * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
* @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
* after MC reboot
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
* %MC_CMD_GET_CAPABILITIES response)
+ * @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
+ * @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
+ * @vport_id: The function's vport ID, only relevant for PFs
+ * @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
+ * @pf_index: The number for this PF, or the parent PF if this is a VF
+#ifdef CONFIG_SFC_SRIOV
+ * @vf: Pointer to VF data structure
+#endif
*/
struct efx_ef10_nic_data {
struct efx_buffer mcdi_buf;
@@ -503,126 +532,27 @@ struct efx_ef10_nic_data {
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
bool must_restore_piobufs;
u32 rx_rss_context;
+ bool rx_rss_context_exclusive;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
bool must_check_datapath_caps;
u32 datapath_caps;
-};
-
-/*
- * On the SFC9000 family each port is associated with 1 PCI physical
- * function (PF) handled by sfc and a configurable number of virtual
- * functions (VFs) that may be handled by some other driver, often in
- * a VM guest. The queue pointer registers are mapped in both PF and
- * VF BARs such that an 8K region provides access to a single RX, TX
- * and event queue (collectively a Virtual Interface, VI or VNIC).
- *
- * The PF has access to all 1024 VIs while VFs are mapped to VIs
- * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
- * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
- * The number of VIs and the VI_SCALE value are configurable but must
- * be established at boot time by firmware.
- */
-
-/* Maximum VI_SCALE parameter supported by Siena */
-#define EFX_VI_SCALE_MAX 6
-/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
- * so this is the smallest allowed value. */
-#define EFX_VI_BASE 128U
-/* Maximum number of VFs allowed */
-#define EFX_VF_COUNT_MAX 127
-/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
-#define EFX_MAX_VF_EVQ_SIZE 8192UL
-/* The number of buffer table entries reserved for each VI on a VF */
-#define EFX_VF_BUFTBL_PER_VI \
- ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
- sizeof(efx_qword_t) / EFX_BUF_SIZE)
-
+ unsigned int rx_dpcpu_fw_id;
+ unsigned int tx_dpcpu_fw_id;
+ unsigned int vport_id;
+ bool must_probe_vswitching;
+ unsigned int pf_index;
+ u8 port_id[ETH_ALEN];
#ifdef CONFIG_SFC_SRIOV
-
-/* SIENA */
-static inline bool efx_siena_sriov_wanted(struct efx_nic *efx)
-{
- return efx->vf_count != 0;
-}
-
-static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
-{
- return efx->vf_init_count != 0;
-}
-
-static inline unsigned int efx_vf_size(struct efx_nic *efx)
-{
- return 1 << efx->vi_scale;
-}
+ unsigned int vf_index;
+ struct ef10_vf *vf;
+#endif
+ u8 vport_mac[ETH_ALEN];
+};
int efx_init_sriov(void);
-void efx_siena_sriov_probe(struct efx_nic *efx);
-int efx_siena_sriov_init(struct efx_nic *efx);
-void efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
-void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
-void efx_siena_sriov_reset(struct efx_nic *efx);
-void efx_siena_sriov_fini(struct efx_nic *efx);
void efx_fini_sriov(void);
-/* EF10 */
-static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
-
-#else
-
-/* SIENA */
-static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; }
-static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
-static inline int efx_init_sriov(void) { return 0; }
-static inline void efx_siena_sriov_probe(struct efx_nic *efx) {}
-static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx,
- efx_qword_t *event) {}
-static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx,
- efx_qword_t *event) {}
-static inline void efx_siena_sriov_event(struct efx_channel *channel,
- efx_qword_t *event) {}
-static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx,
- unsigned dmaq) {}
-static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {}
-static inline void efx_siena_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_siena_sriov_fini(struct efx_nic *efx) {}
-static inline void efx_fini_sriov(void) {}
-
-/* EF10 */
-static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
-
-#endif
-
-/* FALCON */
-static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {}
-
-int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf,
- u16 vlan, u8 qos);
-int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf,
- struct ifla_vf_info *ivf);
-int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
- bool spoofchk);
-
struct ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
@@ -654,6 +584,7 @@ extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
extern const struct efx_nic_type siena_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_nic_type;
+extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
/**************************************************************************
*
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index a2e9aee05cdd..ad62615a93dc 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -306,7 +306,7 @@ struct efx_ptp_data {
struct work_struct pps_work;
struct workqueue_struct *pps_workwq;
bool nic_ts_enabled;
- MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
+ _MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
unsigned int good_syncs;
unsigned int fast_syncs;
@@ -389,11 +389,8 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev,
- "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc);
+ if (rc)
memset(outbuf, 0, sizeof(outbuf));
- }
efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
efx_ptp_stat_mask,
stats, _MCDI_PTR(outbuf, 0), false);
@@ -490,14 +487,20 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
*/
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &out_len);
- if (rc == 0)
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &out_len);
+ if (rc == 0) {
fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
- else if (rc == -EINVAL)
+ } else if (rc == -EINVAL) {
fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
- else
+ } else if (rc == -EPERM) {
+ netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+ return rc;
+ } else {
+ efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
+ outbuf, sizeof(outbuf), rc);
return rc;
+ }
if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
@@ -541,8 +544,8 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
if (rc == 0) {
efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
@@ -558,6 +561,8 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
efx->ptp_data->ts_corrections.pps_out = 0;
efx->ptp_data->ts_corrections.pps_in = 0;
} else {
+ efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf,
+ sizeof(outbuf), rc);
return rc;
}
@@ -568,7 +573,7 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
static int efx_ptp_enable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ MCDI_DECLARE_BUF_ERR(outbuf);
int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
@@ -596,7 +601,7 @@ static int efx_ptp_enable(struct efx_nic *efx)
static int efx_ptp_disable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ MCDI_DECLARE_BUF_ERR(outbuf);
int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
@@ -604,7 +609,12 @@ static int efx_ptp_disable(struct efx_nic *efx)
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL);
rc = (rc == -EALREADY) ? 0 : rc;
- if (rc)
+ /* If we get ENOSYS, the NIC doesn't support PTP, and thus this function
+ * should only have been called during probe.
+ */
+ if (rc == -ENOSYS || rc == -EPERM)
+ netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+ else if (rc)
efx_mcdi_display_error(efx, MC_CMD_PTP,
MC_CMD_PTP_IN_DISABLE_LEN,
outbuf, sizeof(outbuf), rc);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index f12c811938d2..b323b9167526 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -25,6 +25,7 @@
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "selftest.h"
+#include "siena_sriov.h"
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
@@ -306,7 +307,9 @@ static int siena_probe_nic(struct efx_nic *efx)
if (rc)
goto fail5;
+#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_probe(efx);
+#endif
efx_ptp_defer_probe_with_channel(efx);
return 0;
@@ -321,7 +324,8 @@ fail1:
return rc;
}
-static void siena_rx_push_rss_config(struct efx_nic *efx)
+static int siena_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
{
efx_oword_t temp;
@@ -343,7 +347,11 @@ static void siena_rx_push_rss_config(struct efx_nic *efx)
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+ memcpy(efx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rx_indir_table));
efx_farch_rx_push_indir_table(efx);
+
+ return 0;
}
/* This call performs hardware-specific global initialisation, such as
@@ -386,7 +394,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
- siena_rx_push_rss_config(efx);
+ siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -909,6 +917,8 @@ fail:
*/
const struct efx_nic_type siena_a0_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = siena_mem_map_size,
.probe = siena_probe_nic,
.remove = siena_remove_nic,
@@ -996,11 +1006,22 @@ const struct efx_nic_type siena_a0_nic_type = {
#endif
.ptp_write_host_time = siena_ptp_write_host_time,
.ptp_set_ts_config = siena_ptp_set_ts_config,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_siena_sriov_configure,
.sriov_init = efx_siena_sriov_init,
.sriov_fini = efx_siena_sriov_fini,
- .sriov_mac_address_changed = efx_siena_sriov_mac_address_changed,
.sriov_wanted = efx_siena_sriov_wanted,
.sriov_reset = efx_siena_sriov_reset,
+ .sriov_flr = efx_siena_sriov_flr,
+ .sriov_set_vf_mac = efx_siena_sriov_set_vf_mac,
+ .sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
+ .sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
+ .sriov_get_vf_config = efx_siena_sriov_get_vf_config,
+ .vswitching_probe = efx_port_dummy_op_int,
+ .vswitching_restore = efx_port_dummy_op_int,
+ .vswitching_remove = efx_port_dummy_op_void,
+ .set_mac_address = efx_siena_sriov_mac_address_changed,
+#endif
.revision = EFX_REV_SIENA_A0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index fe83430796fd..da7b94f34604 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -16,6 +16,7 @@
#include "filter.h"
#include "mcdi_pcol.h"
#include "farch_regs.h"
+#include "siena_sriov.h"
#include "vfdi.h"
/* Number of longs required to track all the VIs in a VF */
@@ -38,7 +39,7 @@ enum efx_vf_tx_filter_mode {
};
/**
- * struct efx_vf - Back-end resource and protocol state for a PCI VF
+ * struct siena_vf - Back-end resource and protocol state for a PCI VF
* @efx: The Efx NIC owning this VF
* @pci_rid: The PCI requester ID for this VF
* @pci_name: The PCI name (formatted address) of this VF
@@ -83,7 +84,7 @@ enum efx_vf_tx_filter_mode {
* @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
* @reset_work: Work item to schedule a VF reset.
*/
-struct efx_vf {
+struct siena_vf {
struct efx_nic *efx;
unsigned int pci_rid;
char pci_name[13]; /* dddd:bb:dd.f */
@@ -189,7 +190,7 @@ MODULE_PARM_DESC(max_vfs,
*/
static struct workqueue_struct *vfdi_workqueue;
-static unsigned abs_index(struct efx_vf *vf, unsigned index)
+static unsigned abs_index(struct siena_vf *vf, unsigned index)
{
return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
}
@@ -207,8 +208,8 @@ static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable,
MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
- rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
- outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
+ outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
if (rc)
return rc;
if (outlen < MC_CMD_SRIOV_OUT_LEN)
@@ -299,7 +300,7 @@ out:
/* The TX filter is entirely controlled by this driver, and is modified
* underneath the feet of the VF
*/
-static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
@@ -343,7 +344,7 @@ static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
}
/* The RX filter is managed here on behalf of the VF driver */
-static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
@@ -382,7 +383,7 @@ static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
}
}
-static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
+static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -397,7 +398,7 @@ static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
* local_page_list, either by acquiring local_lock or by running from
* efx_siena_sriov_peer_work()
*/
-static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf)
+static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -509,8 +510,9 @@ static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
* Optionally set VF index and VI index within the VF.
*/
static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
- struct efx_vf **vf_out, unsigned *rel_index_out)
+ struct siena_vf **vf_out, unsigned *rel_index_out)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
unsigned vf_i;
if (abs_index < EFX_VI_BASE)
@@ -520,13 +522,13 @@ static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
return true;
if (vf_out)
- *vf_out = efx->vf + vf_i;
+ *vf_out = nic_data->vf + vf_i;
if (rel_index_out)
*rel_index_out = abs_index % efx_vf_size(efx);
return false;
}
-static int efx_vfdi_init_evq(struct efx_vf *vf)
+static int efx_vfdi_init_evq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
@@ -567,7 +569,7 @@ static int efx_vfdi_init_evq(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_init_rxq(struct efx_vf *vf)
+static int efx_vfdi_init_rxq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
@@ -608,7 +610,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_init_txq(struct efx_vf *vf)
+static int efx_vfdi_init_txq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
@@ -655,7 +657,7 @@ static int efx_vfdi_init_txq(struct efx_vf *vf)
}
/* Returns true when efx_vfdi_fini_all_queues should wake */
-static bool efx_vfdi_flush_wake(struct efx_vf *vf)
+static bool efx_vfdi_flush_wake(struct siena_vf *vf)
{
/* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
smp_mb();
@@ -664,7 +666,7 @@ static bool efx_vfdi_flush_wake(struct efx_vf *vf)
atomic_read(&vf->rxq_retry_count);
}
-static void efx_vfdi_flush_clear(struct efx_vf *vf)
+static void efx_vfdi_flush_clear(struct siena_vf *vf)
{
memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
vf->txq_count = 0;
@@ -674,7 +676,7 @@ static void efx_vfdi_flush_clear(struct efx_vf *vf)
atomic_set(&vf->rxq_retry_count, 0);
}
-static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
+static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
efx_oword_t reg;
@@ -757,7 +759,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
return timeout ? 0 : VFDI_RC_ETIMEDOUT;
}
-static int efx_vfdi_insert_filter(struct efx_vf *vf)
+static int efx_vfdi_insert_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -789,7 +791,7 @@ static int efx_vfdi_insert_filter(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
+static int efx_vfdi_remove_all_filters(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -801,7 +803,7 @@ static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_set_status_page(struct efx_vf *vf)
+static int efx_vfdi_set_status_page(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -846,7 +848,7 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_clear_status_page(struct efx_vf *vf)
+static int efx_vfdi_clear_status_page(struct siena_vf *vf)
{
mutex_lock(&vf->status_lock);
vf->status_addr = 0;
@@ -855,7 +857,7 @@ static int efx_vfdi_clear_status_page(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
+typedef int (*efx_vfdi_op_t)(struct siena_vf *vf);
static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
[VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
@@ -870,7 +872,7 @@ static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
static void efx_siena_sriov_vfdi(struct work_struct *work)
{
- struct efx_vf *vf = container_of(work, struct efx_vf, req);
+ struct siena_vf *vf = container_of(work, struct siena_vf, req);
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
struct efx_memcpy_req copy[2];
@@ -936,7 +938,8 @@ static void efx_siena_sriov_vfdi(struct work_struct *work)
* event ring in guest memory with VFDI reset events, then (re-initialise) the
* event queue to raise an interrupt. The guest driver will then recover.
*/
-static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
+
+static void efx_siena_sriov_reset_vf(struct siena_vf *vf,
struct efx_buffer *buffer)
{
struct efx_nic *efx = vf->efx;
@@ -1006,7 +1009,7 @@ static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
{
- struct efx_vf *vf = container_of(work, struct efx_vf, req);
+ struct siena_vf *vf = container_of(work, struct siena_vf, req);
struct efx_nic *efx = vf->efx;
struct efx_buffer buf;
@@ -1055,8 +1058,10 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
if (!max_vfs)
return;
- if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count))
+ if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
+ netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
return;
+ }
if (count > 0 && count > max_vfs)
count = max_vfs;
@@ -1077,7 +1082,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
peer_work);
struct efx_nic *efx = nic_data->efx;
struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
- struct efx_vf *vf;
+ struct siena_vf *vf;
struct efx_local_addr *local_addr;
struct vfdi_endpoint *peer;
struct efx_endpoint_page *epp;
@@ -1099,7 +1104,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
peer_count = 1;
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
mutex_lock(&vf->status_lock);
if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
@@ -1155,7 +1160,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
/* Finally, push the pages */
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
mutex_lock(&vf->status_lock);
if (vf->status_addr)
@@ -1190,14 +1195,16 @@ static void efx_siena_sriov_free_local(struct efx_nic *efx)
static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
{
unsigned index;
- struct efx_vf *vf;
+ struct siena_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
- efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
- if (!efx->vf)
+ nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf),
+ GFP_KERNEL);
+ if (!nic_data->vf)
return -ENOMEM;
for (index = 0; index < efx->vf_count; ++index) {
- vf = efx->vf + index;
+ vf = nic_data->vf + index;
vf->efx = efx;
vf->index = index;
@@ -1216,11 +1223,12 @@ static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
{
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
unsigned int pos;
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
efx_nic_free_buffer(efx, &vf->buf);
kfree(vf->peer_page_addrs);
@@ -1237,7 +1245,7 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
struct siena_nic_data *nic_data = efx->nic_data;
unsigned index, devfn, sriov, buftbl_base;
u16 offset, stride;
- struct efx_vf *vf;
+ struct siena_vf *vf;
int rc;
sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
@@ -1250,7 +1258,7 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
buftbl_base = nic_data->vf_buftbl_base;
devfn = pci_dev->devfn + offset;
for (index = 0; index < efx->vf_count; ++index) {
- vf = efx->vf + index;
+ vf = nic_data->vf + index;
/* Reserve buffer entries */
vf->buftbl_base = buftbl_base;
@@ -1350,7 +1358,7 @@ fail_pci:
fail_vfs:
cancel_work_sync(&nic_data->peer_work);
efx_siena_sriov_free_local(efx);
- kfree(efx->vf);
+ kfree(nic_data->vf);
fail_alloc:
efx_nic_free_buffer(efx, &nic_data->vfdi_status);
fail_status:
@@ -1361,7 +1369,7 @@ fail_cmd:
void efx_siena_sriov_fini(struct efx_nic *efx)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned int pos;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -1377,7 +1385,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
/* Flush all reconfiguration work */
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
cancel_work_sync(&vf->req);
cancel_work_sync(&vf->reset_work);
}
@@ -1388,7 +1396,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
/* Tear down back-end state */
efx_siena_sriov_vfs_fini(efx);
efx_siena_sriov_free_local(efx);
- kfree(efx->vf);
+ kfree(nic_data->vf);
efx_nic_free_buffer(efx, &nic_data->vfdi_status);
efx_siena_sriov_cmd(efx, false, NULL, NULL);
}
@@ -1396,7 +1404,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned qid, seq, type, data;
qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
@@ -1452,11 +1460,12 @@ error:
void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
{
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
if (vf_i > efx->vf_init_count)
return;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
netif_info(efx, hw, efx->net_dev,
"FLR on VF %s\n", vf->pci_name);
@@ -1467,21 +1476,23 @@ void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
vf->evq0_count = 0;
}
-void efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
+int efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
{
struct siena_nic_data *nic_data = efx->nic_data;
struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
if (!efx->vf_init_count)
- return;
+ return 0;
ether_addr_copy(vfdi_status->peers[0].mac_addr,
efx->net_dev->dev_addr);
queue_work(vfdi_workqueue, &nic_data->peer_work);
+
+ return 0;
}
void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned queue, qid;
queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
@@ -1500,7 +1511,7 @@ void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned ev_failed, queue, qid;
queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
@@ -1525,7 +1536,7 @@ void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
/* Called from napi. Schedule the reset work item */
void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned int rel;
if (map_vi_index(efx, dmaq, &vf, &rel))
@@ -1541,9 +1552,10 @@ void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
/* Reset all VFs */
void efx_siena_sriov_reset(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
unsigned int vf_i;
struct efx_buffer buf;
- struct efx_vf *vf;
+ struct siena_vf *vf;
ASSERT_RTNL();
@@ -1557,7 +1569,7 @@ void efx_siena_sriov_reset(struct efx_nic *efx)
return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
efx_siena_sriov_reset_vf(vf, &buf);
}
@@ -1573,7 +1585,6 @@ int efx_init_sriov(void)
vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
if (!vfdi_workqueue)
return -ENOMEM;
-
return 0;
}
@@ -1582,14 +1593,14 @@ void efx_fini_sriov(void)
destroy_workqueue(vfdi_workqueue);
}
-int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->status_lock);
ether_addr_copy(vf->addr.mac_addr, mac);
@@ -1599,16 +1610,16 @@ int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
return 0;
}
-int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
u16 vlan, u8 qos)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
u16 tci;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->status_lock);
tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
@@ -1619,16 +1630,16 @@ int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
return 0;
}
-int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
bool spoofchk)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
int rc;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->txq_lock);
if (vf->txq_count == 0) {
@@ -1643,16 +1654,16 @@ int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
return rc;
}
-int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
struct ifla_vf_info *ivi)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
u16 tci;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
ivi->vf = vf_i;
ether_addr_copy(ivi->mac, vf->addr.mac_addr);
@@ -1666,3 +1677,12 @@ int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
return 0;
}
+bool efx_siena_sriov_wanted(struct efx_nic *efx)
+{
+ return efx->vf_count != 0;
+}
+
+int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs)
+{
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/siena_sriov.h b/drivers/net/ethernet/sfc/siena_sriov.h
new file mode 100644
index 000000000000..d88d4dab170a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena_sriov.h
@@ -0,0 +1,79 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef SIENA_SRIOV_H
+#define SIENA_SRIOV_H
+
+#include "net_driver.h"
+
+/* On the SFC9000 family each port is associated with 1 PCI physical
+ * function (PF) handled by sfc and a configurable number of virtual
+ * functions (VFs) that may be handled by some other driver, often in
+ * a VM guest. The queue pointer registers are mapped in both PF and
+ * VF BARs such that an 8K region provides access to a single RX, TX
+ * and event queue (collectively a Virtual Interface, VI or VNIC).
+ *
+ * The PF has access to all 1024 VIs while VFs are mapped to VIs
+ * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
+ * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
+ * The number of VIs and the VI_SCALE value are configurable but must
+ * be established at boot time by firmware.
+ */
+
+/* Maximum VI_SCALE parameter supported by Siena */
+#define EFX_VI_SCALE_MAX 6
+/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
+ * so this is the smallest allowed value.
+ */
+#define EFX_VI_BASE 128U
+/* Maximum number of VFs allowed */
+#define EFX_VF_COUNT_MAX 127
+/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
+#define EFX_MAX_VF_EVQ_SIZE 8192UL
+/* The number of buffer table entries reserved for each VI on a VF */
+#define EFX_VF_BUFTBL_PER_VI \
+ ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
+ sizeof(efx_qword_t) / EFX_BUF_SIZE)
+
+int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs);
+int efx_siena_sriov_init(struct efx_nic *efx);
+void efx_siena_sriov_fini(struct efx_nic *efx);
+int efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
+bool efx_siena_sriov_wanted(struct efx_nic *efx);
+void efx_siena_sriov_reset(struct efx_nic *efx);
+void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
+
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf,
+ u16 vlan, u8 qos);
+int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
+ bool spoofchk);
+int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf,
+ struct ifla_vf_info *ivf);
+
+#ifdef CONFIG_SFC_SRIOV
+
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
+{
+ return efx->vf_init_count != 0;
+}
+#else /* !CONFIG_SFC_SRIOV */
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
+{
+ return false;
+}
+#endif /* CONFIG_SFC_SRIOV */
+
+void efx_siena_sriov_probe(struct efx_nic *efx);
+void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+
+#endif /* SIENA_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
new file mode 100644
index 000000000000..816c44689e67
--- /dev/null
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -0,0 +1,83 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/module.h>
+#include "net_driver.h"
+#include "nic.h"
+#include "sriov.h"
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_mac)
+ return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+ u8 qos)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_vlan) {
+ if ((vlan & ~VLAN_VID_MASK) ||
+ (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
+ return -EINVAL;
+
+ return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
+ } else {
+ return -EOPNOTSUPP;
+ }
+}
+
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+ bool spoofchk)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_spoofchk)
+ return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+ struct ifla_vf_info *ivi)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_get_vf_config)
+ return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+ int link_state)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_link_state)
+ return efx->type->sriov_set_vf_link_state(efx, vf_i,
+ link_state);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_get_phys_port_id)
+ return efx->type->sriov_get_phys_port_id(efx, ppid);
+ else
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h
new file mode 100644
index 000000000000..400df526586d
--- /dev/null
+++ b/drivers/net/ethernet/sfc/sriov.h
@@ -0,0 +1,31 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_SRIOV_H
+#define EFX_SRIOV_H
+
+#include "net_driver.h"
+
+#ifdef CONFIG_SFC_SRIOV
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac);
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+ u8 qos);
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+ bool spoofchk);
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+ struct ifla_vf_info *ivi);
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+ int link_state);
+int efx_sriov_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid);
+
+#endif /* CONFIG_SFC_SRIOV */
+
+#endif /* EFX_SRIOV_H */
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 7d3af190be55..cec147d1d34f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -16,6 +16,7 @@ if STMMAC_ETH
config STMMAC_PLATFORM
tristate "STMMAC Platform bus support"
depends on STMMAC_ETH
+ select MFD_SYSCON
default y
---help---
This selects the platform specific bus support for the stmmac driver.
@@ -26,6 +27,95 @@ config STMMAC_PLATFORM
If unsure, say N.
+if STMMAC_PLATFORM
+
+config DWMAC_GENERIC
+ tristate "Generic driver for DWMAC"
+ default STMMAC_PLATFORM
+ ---help---
+ Generic DWMAC driver for platforms that don't require any
+ platform specific code to function or is using platform
+ data for setup.
+
+config DWMAC_IPQ806X
+ tristate "QCA IPQ806x DWMAC support"
+ default ARCH_QCOM
+ depends on OF
+ select MFD_SYSCON
+ help
+ Support for QCA IPQ806X DWMAC Ethernet.
+
+ This selects the IPQ806x SoC glue layer support for the stmmac
+ device driver. This driver does not use any of the hardware
+ acceleration features available on this SoC. Network devices
+ will behave like standard non-accelerated ethernet interfaces.
+
+config DWMAC_LPC18XX
+ tristate "NXP LPC18xx/43xx DWMAC support"
+ default ARCH_LPC18XX
+ depends on OF
+ select MFD_SYSCON
+ ---help---
+ Support for NXP LPC18xx/43xx DWMAC Ethernet.
+
+config DWMAC_MESON
+ tristate "Amlogic Meson dwmac support"
+ default ARCH_MESON
+ depends on OF
+ help
+ Support for Ethernet controller on Amlogic Meson SoCs.
+
+ This selects the Amlogic Meson SoC glue layer support for
+ the stmmac device driver. This driver is used for Meson6 and
+ Meson8 SoCs.
+
+config DWMAC_ROCKCHIP
+ tristate "Rockchip dwmac support"
+ default ARCH_ROCKCHIP
+ depends on OF
+ select MFD_SYSCON
+ help
+ Support for Ethernet controller on Rockchip RK3288 SoC.
+
+ This selects the Rockchip RK3288 SoC glue layer support for
+ the stmmac device driver.
+
+config DWMAC_SOCFPGA
+ tristate "SOCFPGA dwmac support"
+ default ARCH_SOCFPGA
+ depends on OF
+ select MFD_SYSCON
+ help
+ Support for ethernet controller on Altera SOCFPGA
+
+ This selects the Altera SOCFPGA SoC glue layer support
+ for the stmmac device driver. This driver is used for
+ arria5 and cyclone5 FPGA SoCs.
+
+config DWMAC_STI
+ tristate "STi GMAC support"
+ default ARCH_STI
+ depends on OF
+ select MFD_SYSCON
+ ---help---
+ Support for ethernet controller on STi SOCs.
+
+ This selects STi SoC glue layer support for the stmmac
+ device driver. This driver is used on for the STi series
+ SOCs GMAC ethernet controller.
+
+config DWMAC_SUNXI
+ tristate "Allwinner GMAC support"
+ default ARCH_SUNXI
+ depends on OF
+ ---help---
+ Support for Allwinner A20/A31 GMAC ethernet controllers.
+
+ This selects Allwinner SoC glue layer support for the
+ stmmac device driver. This driver is used for A20/A31
+ GMAC ethernet controller.
+endif
+
config STMMAC_PCI
tristate "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 73c2715a27f3..b3901616f4f6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -4,9 +4,17 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
-obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
-stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o \
- dwmac-sti.o dwmac-socfpga.o dwmac-rk.o
+# Ordering matters. Generic driver must be last.
+obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
+obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
+obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
+obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o
+obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
+obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o
+obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
+obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
+obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
+stmmac-platform-objs:= stmmac_platform.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
new file mode 100644
index 000000000000..e817a1a44379
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -0,0 +1,41 @@
+/*
+ * Generic DWMAC platform driver
+ *
+ * Copyright (C) 2007-2011 STMicroelectronics Ltd
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+static const struct of_device_id dwmac_generic_match[] = {
+ { .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.610"},
+ { .compatible = "snps,dwmac-3.70a"},
+ { .compatible = "snps,dwmac-3.710"},
+ { .compatible = "snps,dwmac"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, dwmac_generic_match);
+
+static struct platform_driver dwmac_generic_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = of_match_ptr(dwmac_generic_match),
+ },
+};
+module_platform_driver(dwmac_generic_driver);
+
+MODULE_DESCRIPTION("Generic dwmac driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
new file mode 100644
index 000000000000..7e3129e7f143
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -0,0 +1,365 @@
+/*
+ * Qualcomm Atheros IPQ806x GMAC glue layer
+ *
+ * Copyright (C) 2015 The Linux Foundation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/stmmac.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+
+#include "stmmac_platform.h"
+
+#define NSS_COMMON_CLK_GATE 0x8
+#define NSS_COMMON_CLK_GATE_PTP_EN(x) BIT(0x10 + x)
+#define NSS_COMMON_CLK_GATE_RGMII_RX_EN(x) BIT(0x9 + (x * 2))
+#define NSS_COMMON_CLK_GATE_RGMII_TX_EN(x) BIT(0x8 + (x * 2))
+#define NSS_COMMON_CLK_GATE_GMII_RX_EN(x) BIT(0x4 + x)
+#define NSS_COMMON_CLK_GATE_GMII_TX_EN(x) BIT(0x0 + x)
+
+#define NSS_COMMON_CLK_DIV0 0xC
+#define NSS_COMMON_CLK_DIV_OFFSET(x) (x * 8)
+#define NSS_COMMON_CLK_DIV_MASK 0x7f
+
+#define NSS_COMMON_CLK_SRC_CTRL 0x14
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x)
+/* Mode is coded on 1 bit but is different depending on the MAC ID:
+ * MAC0: QSGMII=0 RGMII=1
+ * MAC1: QSGMII=0 SGMII=0 RGMII=1
+ * MAC2 & MAC3: QSGMII=0 SGMII=1
+ */
+#define NSS_COMMON_CLK_SRC_CTRL_RGMII(x) 1
+#define NSS_COMMON_CLK_SRC_CTRL_SGMII(x) ((x >= 2) ? 1 : 0)
+
+#define NSS_COMMON_MACSEC_CTL 0x28
+#define NSS_COMMON_MACSEC_CTL_EXT_BYPASS_EN(x) (1 << x)
+
+#define NSS_COMMON_GMAC_CTL(x) (0x30 + (x * 4))
+#define NSS_COMMON_GMAC_CTL_CSYS_REQ BIT(19)
+#define NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL BIT(16)
+#define NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET 8
+#define NSS_COMMON_GMAC_CTL_IFG_OFFSET 0
+#define NSS_COMMON_GMAC_CTL_IFG_MASK 0x3f
+
+#define NSS_COMMON_CLK_DIV_RGMII_1000 1
+#define NSS_COMMON_CLK_DIV_RGMII_100 9
+#define NSS_COMMON_CLK_DIV_RGMII_10 99
+#define NSS_COMMON_CLK_DIV_SGMII_1000 0
+#define NSS_COMMON_CLK_DIV_SGMII_100 4
+#define NSS_COMMON_CLK_DIV_SGMII_10 49
+
+#define QSGMII_PCS_MODE_CTL 0x68
+#define QSGMII_PCS_MODE_CTL_AUTONEG_EN(x) BIT((x * 8) + 7)
+
+#define QSGMII_PCS_CAL_LCKDT_CTL 0x120
+#define QSGMII_PCS_CAL_LCKDT_CTL_RST BIT(19)
+
+/* Only GMAC1/2/3 support SGMII and their CTL register are not contiguous */
+#define QSGMII_PHY_SGMII_CTL(x) ((x == 1) ? 0x134 : \
+ (0x13c + (4 * (x - 2))))
+#define QSGMII_PHY_CDR_EN BIT(0)
+#define QSGMII_PHY_RX_FRONT_EN BIT(1)
+#define QSGMII_PHY_RX_SIGNAL_DETECT_EN BIT(2)
+#define QSGMII_PHY_TX_DRIVER_EN BIT(3)
+#define QSGMII_PHY_QSGMII_EN BIT(7)
+#define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET 12
+#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK 0x7
+#define QSGMII_PHY_RX_DC_BIAS_OFFSET 18
+#define QSGMII_PHY_RX_DC_BIAS_MASK 0x3
+#define QSGMII_PHY_RX_INPUT_EQU_OFFSET 20
+#define QSGMII_PHY_RX_INPUT_EQU_MASK 0x3
+#define QSGMII_PHY_CDR_PI_SLEW_OFFSET 22
+#define QSGMII_PHY_CDR_PI_SLEW_MASK 0x3
+#define QSGMII_PHY_TX_DRV_AMP_OFFSET 28
+#define QSGMII_PHY_TX_DRV_AMP_MASK 0xf
+
+struct ipq806x_gmac {
+ struct platform_device *pdev;
+ struct regmap *nss_common;
+ struct regmap *qsgmii_csr;
+ uint32_t id;
+ struct clk *core_clk;
+ phy_interface_t phy_mode;
+};
+
+static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ struct device *dev = &gmac->pdev->dev;
+ int div;
+
+ switch (speed) {
+ case SPEED_1000:
+ div = NSS_COMMON_CLK_DIV_SGMII_1000;
+ break;
+
+ case SPEED_100:
+ div = NSS_COMMON_CLK_DIV_SGMII_100;
+ break;
+
+ case SPEED_10:
+ div = NSS_COMMON_CLK_DIV_SGMII_10;
+ break;
+
+ default:
+ dev_err(dev, "Speed %dMbps not supported in SGMII\n", speed);
+ return -EINVAL;
+ }
+
+ return div;
+}
+
+static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ struct device *dev = &gmac->pdev->dev;
+ int div;
+
+ switch (speed) {
+ case SPEED_1000:
+ div = NSS_COMMON_CLK_DIV_RGMII_1000;
+ break;
+
+ case SPEED_100:
+ div = NSS_COMMON_CLK_DIV_RGMII_100;
+ break;
+
+ case SPEED_10:
+ div = NSS_COMMON_CLK_DIV_RGMII_10;
+ break;
+
+ default:
+ dev_err(dev, "Speed %dMbps not supported in RGMII\n", speed);
+ return -EINVAL;
+ }
+
+ return div;
+}
+
+static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ uint32_t clk_bits, val;
+ int div;
+
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ div = get_clk_div_rgmii(gmac, speed);
+ clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
+ break;
+
+ case PHY_INTERFACE_MODE_SGMII:
+ div = get_clk_div_sgmii(gmac, speed);
+ clk_bits = NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
+ break;
+
+ default:
+ dev_err(&gmac->pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ return -EINVAL;
+ }
+
+ /* Disable the clocks */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val &= ~clk_bits;
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ /* Set the divider */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_DIV0, &val);
+ val &= ~(NSS_COMMON_CLK_DIV_MASK
+ << NSS_COMMON_CLK_DIV_OFFSET(gmac->id));
+ val |= div << NSS_COMMON_CLK_DIV_OFFSET(gmac->id);
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_DIV0, val);
+
+ /* Enable the clock back */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val |= clk_bits;
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ return 0;
+}
+
+static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+{
+ struct device *dev = &gmac->pdev->dev;
+
+ gmac->phy_mode = of_get_phy_mode(dev->of_node);
+ if (gmac->phy_mode < 0) {
+ dev_err(dev, "missing phy mode property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) {
+ dev_err(dev, "missing qcom id property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* The GMACs are called 1 to 4 in the documentation, but to simplify the
+ * code and keep it consistent with the Linux convention, we'll number
+ * them from 0 to 3 here.
+ */
+ if (gmac->id < 0 || gmac->id > 3) {
+ dev_err(dev, "invalid gmac id\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ gmac->core_clk = devm_clk_get(dev, "stmmaceth");
+ if (IS_ERR(gmac->core_clk)) {
+ dev_err(dev, "missing stmmaceth clk property\n");
+ return gmac->core_clk;
+ }
+ clk_set_rate(gmac->core_clk, 266000000);
+
+ /* Setup the register map for the nss common registers */
+ gmac->nss_common = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "qcom,nss-common");
+ if (IS_ERR(gmac->nss_common)) {
+ dev_err(dev, "missing nss-common node\n");
+ return gmac->nss_common;
+ }
+
+ /* Setup the register map for the qsgmii csr registers */
+ gmac->qsgmii_csr = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "qcom,qsgmii-csr");
+ if (IS_ERR(gmac->qsgmii_csr)) {
+ dev_err(dev, "missing qsgmii-csr node\n");
+ return gmac->qsgmii_csr;
+ }
+
+ return NULL;
+}
+
+static void *ipq806x_gmac_setup(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipq806x_gmac *gmac;
+ int val;
+ void *err;
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return ERR_PTR(-ENOMEM);
+
+ gmac->pdev = pdev;
+
+ err = ipq806x_gmac_of_parse(gmac);
+ if (err) {
+ dev_err(dev, "device tree parsing error\n");
+ return err;
+ }
+
+ regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
+ QSGMII_PCS_CAL_LCKDT_CTL_RST);
+
+ /* Inter frame gap is set to 12 */
+ val = 12 << NSS_COMMON_GMAC_CTL_IFG_OFFSET |
+ 12 << NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET;
+ /* We also initiate an AXI low power exit request */
+ val |= NSS_COMMON_GMAC_CTL_CSYS_REQ;
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ return NULL;
+ }
+ regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
+
+ /* Configure the clock src according to the mode */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
+ val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
+ NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ val |= NSS_COMMON_CLK_SRC_CTRL_SGMII(gmac->id) <<
+ NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ return NULL;
+ }
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
+
+ /* Enable PTP clock */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+ regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id),
+ QSGMII_PHY_CDR_EN |
+ QSGMII_PHY_RX_FRONT_EN |
+ QSGMII_PHY_RX_SIGNAL_DETECT_EN |
+ QSGMII_PHY_TX_DRIVER_EN |
+ QSGMII_PHY_QSGMII_EN |
+ 0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
+ 0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET |
+ 0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
+ 0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
+ 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
+ }
+
+ return gmac;
+}
+
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct ipq806x_gmac *gmac = priv;
+
+ ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static const struct stmmac_of_data ipq806x_gmac_data = {
+ .has_gmac = 1,
+ .setup = ipq806x_gmac_setup,
+ .fix_mac_speed = ipq806x_gmac_fix_mac_speed,
+};
+
+static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
+ { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
+
+static struct platform_driver ipq806x_gmac_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "ipq806x-gmac-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = ipq806x_gmac_dwmac_match,
+ },
+};
+module_platform_driver(ipq806x_gmac_dwmac_driver);
+
+MODULE_AUTHOR("Mathieu Olivari <mathieu@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm Atheros IPQ806x DWMAC specific glue layer");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
new file mode 100644
index 000000000000..cb888d3ebbdc
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -0,0 +1,99 @@
+/*
+ * DWMAC glue for NXP LPC18xx/LPC43xx Ethernet
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+/* Register defines for CREG syscon */
+#define LPC18XX_CREG_CREG6 0x12c
+# define LPC18XX_CREG_CREG6_ETHMODE_MASK 0x7
+# define LPC18XX_CREG_CREG6_ETHMODE_MII 0x0
+# define LPC18XX_CREG_CREG6_ETHMODE_RMII 0x4
+
+struct lpc18xx_dwmac_priv_data {
+ struct regmap *reg;
+ int interface;
+};
+
+static void *lpc18xx_dwmac_setup(struct platform_device *pdev)
+{
+ struct lpc18xx_dwmac_priv_data *dwmac;
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return ERR_PTR(-ENOMEM);
+
+ dwmac->interface = of_get_phy_mode(pdev->dev.of_node);
+ if (dwmac->interface < 0)
+ return ERR_PTR(dwmac->interface);
+
+ dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+ if (IS_ERR(dwmac->reg)) {
+ dev_err(&pdev->dev, "Syscon lookup failed\n");
+ return dwmac->reg;
+ }
+
+ return dwmac;
+}
+
+static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct lpc18xx_dwmac_priv_data *dwmac = priv;
+ u8 ethmode;
+
+ if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
+ ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
+ } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+ ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
+ } else {
+ dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6,
+ LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
+
+ return 0;
+}
+
+static const struct stmmac_of_data lpc18xx_dwmac_data = {
+ .has_gmac = 1,
+ .setup = lpc18xx_dwmac_setup,
+ .init = lpc18xx_dwmac_init,
+};
+
+static const struct of_device_id lpc18xx_dwmac_match[] = {
+ { .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
+
+static struct platform_driver lpc18xx_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "lpc18xx-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = lpc18xx_dwmac_match,
+ },
+};
+module_platform_driver(lpc18xx_dwmac_driver);
+
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_DESCRIPTION("DWMAC glue for LPC18xx/43xx Ethernet");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index cca028d632f6..61a324a87d09 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -15,6 +15,7 @@
#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/ioport.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/stmmac.h>
@@ -63,7 +64,28 @@ static void *meson6_dwmac_setup(struct platform_device *pdev)
return dwmac;
}
-const struct stmmac_of_data meson6_dwmac_data = {
+static const struct stmmac_of_data meson6_dwmac_data = {
.setup = meson6_dwmac_setup,
.fix_mac_speed = meson6_dwmac_fix_mac_speed,
};
+
+static const struct of_device_id meson6_dwmac_match[] = {
+ { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
+
+static struct platform_driver meson6_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "meson6-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = meson6_dwmac_match,
+ },
+};
+module_platform_driver(meson6_dwmac_driver);
+
+MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
+MODULE_DESCRIPTION("Amlogic Meson DWMAC glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 6249a4ec08f0..30e28f0d9a60 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -22,13 +22,17 @@
#include <linux/phy.h>
#include <linux/of_net.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include "stmmac_platform.h"
+
struct rk_priv_data {
struct platform_device *pdev;
int phy_iface;
@@ -428,10 +432,31 @@ static void rk_fix_speed(void *priv, unsigned int speed)
dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
}
-const struct stmmac_of_data rk3288_gmac_data = {
+static const struct stmmac_of_data rk3288_gmac_data = {
.has_gmac = 1,
.fix_mac_speed = rk_fix_speed,
.setup = rk_gmac_setup,
.init = rk_gmac_init,
.exit = rk_gmac_exit,
};
+
+static const struct of_device_id rk_gmac_dwmac_match[] = {
+ { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
+
+static struct platform_driver rk_gmac_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "rk_gmac-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = rk_gmac_dwmac_match,
+ },
+};
+module_platform_driver(rk_gmac_dwmac_driver);
+
+MODULE_AUTHOR("Chen-Zhi (Roger Chen) <roger.chen@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip RK3288 DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 5a36bd2c7837..8141c5b844ae 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -257,9 +257,28 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
return ret;
}
-const struct stmmac_of_data socfpga_gmac_data = {
+static const struct stmmac_of_data socfpga_gmac_data = {
.setup = socfpga_dwmac_probe,
.init = socfpga_dwmac_init,
.exit = socfpga_dwmac_exit,
.fix_mac_speed = socfpga_dwmac_fix_mac_speed,
};
+
+static const struct of_device_id socfpga_dwmac_match[] = {
+ { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
+
+static struct platform_driver socfpga_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "socfpga-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = socfpga_dwmac_match,
+ },
+};
+module_platform_driver(socfpga_dwmac_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index bb6e2dc61bec..a2e8111c5d14 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -17,6 +17,7 @@
#include <linux/stmmac.h>
#include <linux/phy.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/of.h>
@@ -351,16 +352,40 @@ static void *sti_dwmac_setup(struct platform_device *pdev)
return dwmac;
}
-const struct stmmac_of_data stih4xx_dwmac_data = {
+static const struct stmmac_of_data stih4xx_dwmac_data = {
.fix_mac_speed = stih4xx_fix_retime_src,
.setup = sti_dwmac_setup,
.init = stix4xx_init,
.exit = sti_dwmac_exit,
};
-const struct stmmac_of_data stid127_dwmac_data = {
+static const struct stmmac_of_data stid127_dwmac_data = {
.fix_mac_speed = stid127_fix_retime_src,
.setup = sti_dwmac_setup,
.init = stid127_init,
.exit = sti_dwmac_exit,
};
+
+static const struct of_device_id sti_dwmac_match[] = {
+ { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
+ { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
+ { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
+ { .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, sti_dwmac_match);
+
+static struct platform_driver sti_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "sti-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sti_dwmac_match,
+ },
+};
+module_platform_driver(sti_dwmac_driver);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index c5ea9ab75b03..15048ca39759 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -18,7 +18,9 @@
#include <linux/stmmac.h>
#include <linux/clk.h>
+#include <linux/module.h>
#include <linux/phy.h>
+#include <linux/platform_device.h>
#include <linux/of_net.h>
#include <linux/regulator/consumer.h>
@@ -132,7 +134,7 @@ static void sun7i_fix_speed(void *priv, unsigned int speed)
/* of_data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers. */
-const struct stmmac_of_data sun7i_gmac_data = {
+static const struct stmmac_of_data sun7i_gmac_data = {
.has_gmac = 1,
.tx_coe = 1,
.fix_mac_speed = sun7i_fix_speed,
@@ -140,3 +142,24 @@ const struct stmmac_of_data sun7i_gmac_data = {
.init = sun7i_gmac_init,
.exit = sun7i_gmac_exit,
};
+
+static const struct of_device_id sun7i_dwmac_match[] = {
+ { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
+
+static struct platform_driver sun7i_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "sun7i-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sun7i_dwmac_match,
+ },
+};
+module_platform_driver(sun7i_dwmac_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner sunxi DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 73bab983edd9..1f3b33a6c6a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -34,6 +34,14 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/reset.h>
+struct stmmac_resources {
+ void __iomem *addr;
+ const char *mac;
+ int wol_irq;
+ int lpi_irq;
+ int irq;
+};
+
struct stmmac_tx_info {
dma_addr_t buf;
bool map_as_page;
@@ -135,9 +143,9 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_resume(struct net_device *ndev);
int stmmac_suspend(struct net_device *ndev);
int stmmac_dvr_remove(struct net_device *ndev);
-struct stmmac_priv *stmmac_dvr_probe(struct device *device,
- struct plat_stmmacenet_data *plat_dat,
- void __iomem *addr);
+int stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res);
void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 2c5ce2baca87..a5156739e1e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -52,6 +52,7 @@
#include "stmmac_ptp.h"
#include "stmmac.h"
#include <linux/reset.h>
+#include <linux/of_mdio.h>
#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
@@ -816,18 +817,25 @@ static int stmmac_init_phy(struct net_device *dev)
priv->speed = 0;
priv->oldduplex = -1;
- if (priv->plat->phy_bus_name)
- snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
- priv->plat->phy_bus_name, priv->plat->bus_id);
- else
- snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
- priv->plat->bus_id);
+ if (priv->plat->phy_node) {
+ phydev = of_phy_connect(dev, priv->plat->phy_node,
+ &stmmac_adjust_link, 0, interface);
+ } else {
+ if (priv->plat->phy_bus_name)
+ snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
+ priv->plat->phy_bus_name, priv->plat->bus_id);
+ else
+ snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+ priv->plat->bus_id);
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
- priv->plat->phy_addr);
- pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->plat->phy_addr);
+ pr_debug("stmmac_init_phy: trying to attach to %s\n",
+ phy_id_fmt);
- phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
+ phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
+ interface);
+ }
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -848,7 +856,7 @@ static int stmmac_init_phy(struct net_device *dev)
* device as well.
* Note: phydev->phy_id is the result of reading the UID PHY registers.
*/
- if (phydev->phy_id == 0) {
+ if (!priv->plat->phy_node && phydev->phy_id == 0) {
phy_disconnect(phydev);
return -ENODEV;
}
@@ -975,13 +983,11 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
{
struct sk_buff *skb;
- skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
- flags);
+ skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
if (!skb) {
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
return -ENOMEM;
}
- skb_reserve(skb, NET_IP_ALIGN);
priv->rx_skbuff[i] = skb;
priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
priv->dma_buf_sz,
@@ -2800,16 +2806,15 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
* stmmac_dvr_probe
* @device: device pointer
* @plat_dat: platform data pointer
- * @addr: iobase memory address
+ * @res: stmmac resource pointer
* Description: this is the main probe function used to
* call the alloc_etherdev, allocate the priv structure.
* Return:
- * on success the new private structure is returned, otherwise the error
- * pointer.
+ * returns 0 on success, otherwise errno.
*/
-struct stmmac_priv *stmmac_dvr_probe(struct device *device,
- struct plat_stmmacenet_data *plat_dat,
- void __iomem *addr)
+int stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
{
int ret = 0;
struct net_device *ndev = NULL;
@@ -2817,7 +2822,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
ndev = alloc_etherdev(sizeof(struct stmmac_priv));
if (!ndev)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
SET_NETDEV_DEV(ndev, device);
@@ -2828,8 +2833,17 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
stmmac_set_ethtool_ops(ndev);
priv->pause = pause;
priv->plat = plat_dat;
- priv->ioaddr = addr;
- priv->dev->base_addr = (unsigned long)addr;
+ priv->ioaddr = res->addr;
+ priv->dev->base_addr = (unsigned long)res->addr;
+
+ priv->dev->irq = res->irq;
+ priv->wol_irq = res->wol_irq;
+ priv->lpi_irq = res->lpi_irq;
+
+ if (res->mac)
+ memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
+
+ dev_set_drvdata(device, priv);
/* Verify driver arguments */
stmmac_verify_args();
@@ -2944,7 +2958,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
}
}
- return priv;
+ return 0;
error_mdio_register:
unregister_netdev(ndev);
@@ -2957,7 +2971,7 @@ error_pclk_get:
error_clk_get:
free_netdev(ndev);
- return ERR_PTR(ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 3bca908716e2..d71a721ea61c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -163,7 +163,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
{
struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
struct plat_stmmacenet_data *plat;
- struct stmmac_priv *priv;
+ struct stmmac_resources res;
int i;
int ret;
@@ -214,19 +214,12 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
pci_enable_msi(pdev);
- priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]);
- if (IS_ERR(priv)) {
- dev_err(&pdev->dev, "%s: main driver probe failed\n", __func__);
- return PTR_ERR(priv);
- }
- priv->dev->irq = pdev->irq;
- priv->wol_irq = pdev->irq;
-
- pci_set_drvdata(pdev, priv->dev);
+ memset(&res, 0, sizeof(res));
+ res.addr = pcim_iomap_table(pdev)[i];
+ res.wol_irq = pdev->irq;
+ res.irq = pdev->irq;
- dev_dbg(&pdev->dev, "STMMAC PCI driver registration completed\n");
-
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat, &res);
}
/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 68aec5c460db..f3918c7e7eeb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -28,29 +28,11 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include "stmmac.h"
#include "stmmac_platform.h"
-static const struct of_device_id stmmac_dt_ids[] = {
- /* SoC specific glue layers should come before generic bindings */
- { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
- { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
- { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
- { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
- { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
- { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
- { .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
- { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
- { .compatible = "st,spear600-gmac"},
- { .compatible = "snps,dwmac-3.610"},
- { .compatible = "snps,dwmac-3.70a"},
- { .compatible = "snps,dwmac-3.710"},
- { .compatible = "snps,dwmac"},
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
-
#ifdef CONFIG_OF
/**
@@ -129,14 +111,9 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
struct device_node *np = pdev->dev.of_node;
struct stmmac_dma_cfg *dma_cfg;
const struct of_device_id *device;
+ struct device *dev = &pdev->dev;
- if (!np)
- return -ENODEV;
-
- device = of_match_device(stmmac_dt_ids, &pdev->dev);
- if (!device)
- return -ENODEV;
-
+ device = of_match_device(dev->driver->of_match_table, dev);
if (device->data) {
const struct stmmac_of_data *data = device->data;
plat->has_gmac = data->has_gmac;
@@ -168,13 +145,24 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
/* Default to phy auto-detection */
plat->phy_addr = -1;
+ /* If we find a phy-handle property, use it as the PHY */
+ plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+ /* If phy-handle is not specified, check if we have a fixed-phy */
+ if (!plat->phy_node && of_phy_is_fixed_link(np)) {
+ if ((of_phy_register_fixed_link(np) < 0))
+ return -ENODEV;
+
+ plat->phy_node = of_node_get(np);
+ }
+
/* "snps,phy-addr" is not a standard property. Mark it as deprecated
* and warn of its use. Remove this when phy node support is added.
*/
if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
- if (plat->phy_bus_name)
+ if (plat->phy_node || plat->phy_bus_name)
plat->mdio_bus_data = NULL;
else
plat->mdio_bus_data =
@@ -232,8 +220,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
if (of_find_property(np, "snps,pbl", NULL)) {
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
- if (!dma_cfg)
+ if (!dma_cfg) {
+ of_node_put(np);
return -ENOMEM;
+ }
plat->dma_cfg = dma_cfg;
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
dma_cfg->fixed_burst =
@@ -268,27 +258,26 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
* the necessary platform resources, invoke custom helper (if required) and
* invoke the main probe function.
*/
-static int stmmac_pltfr_probe(struct platform_device *pdev)
+int stmmac_pltfr_probe(struct platform_device *pdev)
{
+ struct stmmac_resources stmmac_res;
int ret = 0;
struct resource *res;
struct device *dev = &pdev->dev;
- void __iomem *addr = NULL;
- struct stmmac_priv *priv = NULL;
struct plat_stmmacenet_data *plat_dat = NULL;
- const char *mac = NULL;
- int irq, wol_irq, lpi_irq;
+
+ memset(&stmmac_res, 0, sizeof(stmmac_res));
/* Get IRQ information early to have an ability to ask for deferred
* probe if needed before we went too far with resource allocation.
*/
- irq = platform_get_irq_byname(pdev, "macirq");
- if (irq < 0) {
- if (irq != -EPROBE_DEFER) {
+ stmmac_res.irq = platform_get_irq_byname(pdev, "macirq");
+ if (stmmac_res.irq < 0) {
+ if (stmmac_res.irq != -EPROBE_DEFER) {
dev_err(dev,
"MAC IRQ configuration information not found\n");
}
- return irq;
+ return stmmac_res.irq;
}
/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
@@ -298,21 +287,21 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
* In case the wake up interrupt is not passed from the platform
* so the driver will continue to use the mac irq (ndev->irq)
*/
- wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (wol_irq < 0) {
- if (wol_irq == -EPROBE_DEFER)
+ stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (stmmac_res.wol_irq < 0) {
+ if (stmmac_res.wol_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
- wol_irq = irq;
+ stmmac_res.wol_irq = stmmac_res.irq;
}
- lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
- if (lpi_irq == -EPROBE_DEFER)
+ stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+ if (stmmac_res.lpi_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
+ stmmac_res.addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(stmmac_res.addr))
+ return PTR_ERR(stmmac_res.addr);
plat_dat = dev_get_platdata(&pdev->dev);
@@ -332,7 +321,7 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
plat_dat->unicast_filter_entries = 1;
if (pdev->dev.of_node) {
- ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
+ ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac);
if (ret) {
pr_err("%s: main dt probe failed", __func__);
return ret;
@@ -353,27 +342,9 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
return ret;
}
- priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
- if (IS_ERR(priv)) {
- pr_err("%s: main driver probe failed", __func__);
- return PTR_ERR(priv);
- }
-
- /* Copy IRQ values to priv structure which is now avaialble */
- priv->dev->irq = irq;
- priv->wol_irq = wol_irq;
- priv->lpi_irq = lpi_irq;
-
- /* Get MAC address if available (DT) */
- if (mac)
- memcpy(priv->dev->dev_addr, mac, ETH_ALEN);
-
- platform_set_drvdata(pdev, priv->dev);
-
- pr_debug("STMMAC platform driver registration completed");
-
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
+EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
/**
* stmmac_pltfr_remove
@@ -381,7 +352,7 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
* Description: this function calls the main to free the net resources
* and calls the platforms hook and release the resources (e.g. mem).
*/
-static int stmmac_pltfr_remove(struct platform_device *pdev)
+int stmmac_pltfr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -395,6 +366,7 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
return ret;
}
+EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
#ifdef CONFIG_PM_SLEEP
/**
@@ -438,21 +410,6 @@ static int stmmac_pltfr_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops,
- stmmac_pltfr_suspend, stmmac_pltfr_resume);
-
-static struct platform_driver stmmac_pltfr_driver = {
- .probe = stmmac_pltfr_probe,
- .remove = stmmac_pltfr_remove,
- .driver = {
- .name = STMMAC_RESOURCE_NAME,
- .pm = &stmmac_pltfr_pm_ops,
- .of_match_table = of_match_ptr(stmmac_dt_ids),
- },
-};
-
-module_platform_driver(stmmac_pltfr_driver);
-
-MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
-MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
-MODULE_LICENSE("GPL");
+SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
+ stmmac_pltfr_resume);
+EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 093eb99e5ffd..71da86d7bd00 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -19,11 +19,8 @@
#ifndef __STMMAC_PLATFORM_H__
#define __STMMAC_PLATFORM_H__
-extern const struct stmmac_of_data meson6_dwmac_data;
-extern const struct stmmac_of_data sun7i_gmac_data;
-extern const struct stmmac_of_data stih4xx_dwmac_data;
-extern const struct stmmac_of_data stid127_dwmac_data;
-extern const struct stmmac_of_data socfpga_gmac_data;
-extern const struct stmmac_of_data rk3288_gmac_data;
+int stmmac_pltfr_probe(struct platform_device *pdev);
+int stmmac_pltfr_remove(struct platform_device *pdev);
+extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
#endif /* __STMMAC_PLATFORM_H__ */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b536b4c82752..462820514fae 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1361,7 +1361,6 @@ static int cpsw_ndo_stop(struct net_device *ndev)
if (cpsw_common_res_usage_state(priv) <= 1) {
cpts_unregister(priv->cpts);
cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
cpdma_ctlr_stop(priv->dma);
cpsw_ale_stop(priv->ale);
}
@@ -1456,7 +1455,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
if (priv->cpts->rx_enable)
ctrl |= CTRL_V2_RX_TS_BITS;
- break;
+ break;
case CPSW_VERSION_3:
default:
ctrl &= ~CTRL_V3_ALL_TS_MASK;
@@ -1466,7 +1465,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
if (priv->cpts->rx_enable)
ctrl |= CTRL_V3_RX_TS_BITS;
- break;
+ break;
}
mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -1589,10 +1588,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
ndev->stats.tx_errors++;
cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
cpdma_chan_stop(priv->txch);
cpdma_chan_start(priv->txch);
- cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
}
@@ -1629,10 +1626,8 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
struct cpsw_priv *priv = netdev_priv(ndev);
cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
cpsw_rx_interrupt(priv->irqs_table[0], priv);
cpsw_tx_interrupt(priv->irqs_table[1], priv);
- cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
}
#endif
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 6e927b4583aa..43b061bd8e07 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -268,39 +268,6 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
}
EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast);
-static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
- int port_mask)
-{
- int port;
-
- port = cpsw_ale_get_port_num(ale_entry);
- if ((BIT(port) & port_mask) == 0)
- return; /* ports dont intersect, not interested */
- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
-}
-
-int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
-{
- u32 ale_entry[ALE_ENTRY_WORDS];
- int ret, idx;
-
- for (idx = 0; idx < ale->params.ale_entries; idx++) {
- cpsw_ale_read(ale, idx, ale_entry);
- ret = cpsw_ale_get_entry_type(ale_entry);
- if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
- continue;
-
- if (cpsw_ale_get_mcast(ale_entry))
- cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
- else
- cpsw_ale_flush_ucast(ale, ale_entry, port_mask);
-
- cpsw_ale_write(ale, idx, ale_entry);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_flush);
-
static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
int flags, u16 vid)
{
@@ -752,18 +719,6 @@ static void cpsw_ale_timer(unsigned long arg)
}
}
-int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout)
-{
- del_timer_sync(&ale->timer);
- ale->ageout = ageout * HZ;
- if (ale->ageout) {
- ale->timer.expires = jiffies + ale->ageout;
- add_timer(&ale->timer);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_set_ageout);
-
void cpsw_ale_start(struct cpsw_ale *ale)
{
u32 rev;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index af1e7ecd87c6..a7001894f3da 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -90,8 +90,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale);
void cpsw_ale_start(struct cpsw_ale *ale);
void cpsw_ale_stop(struct cpsw_ale *ale);
-int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
-int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
int flags, u16 vid);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 43efc3a0cda5..5ec4ed3f6c8d 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -537,7 +537,7 @@ int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
static void netcp_frag_free(bool is_frag, void *ptr)
{
if (is_frag)
- put_page(virt_to_head_page(ptr));
+ skb_free_frag(ptr);
else
kfree(ptr);
}
@@ -698,7 +698,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
}
}
- netcp->ndev->last_rx = jiffies;
netcp->ndev->stats.rx_packets++;
netcp->ndev->stats.rx_bytes += skb->len;
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 3d8f60d9643e..6f0a4495c7f3 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -721,9 +721,6 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
if (!hash_default)
__inv_buffer(buf, len);
- /* ISSUE: Is this needed? */
- dev->last_rx = jiffies;
-
#ifdef TILE_NET_DUMP_PACKETS
dump_packet(buf, len, "rx");
#endif /* TILE_NET_DUMP_PACKETS */
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 8e9371a3388a..3c54a2cae5df 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -604,8 +604,7 @@ spider_net_set_multi(struct net_device *netdev)
int i;
u32 reg;
struct spider_net_card *card = netdev_priv(netdev);
- unsigned long bitmask[SPIDER_NET_MULTICAST_HASHES / BITS_PER_LONG] =
- {0, };
+ DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {};
spider_net_set_promisc(card);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index de2850497c09..725106f75d42 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -472,8 +472,7 @@ struct rhine_private {
/* Frequently used values: keep some adjacent for cache effect. */
u32 quirks;
- struct rx_desc *rx_head_desc;
- unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int cur_rx;
unsigned int cur_tx, dirty_tx;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
struct rhine_stats rx_stats;
@@ -1213,17 +1212,61 @@ static void free_ring(struct net_device* dev)
}
-static void alloc_rbufs(struct net_device *dev)
+struct rhine_skb_dma {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+};
+
+static inline int rhine_skb_dma_init(struct net_device *dev,
+ struct rhine_skb_dma *sd)
{
struct rhine_private *rp = netdev_priv(dev);
struct device *hwdev = dev->dev.parent;
- dma_addr_t next;
+ const int size = rp->rx_buf_sz;
+
+ sd->skb = netdev_alloc_skb(dev, size);
+ if (!sd->skb)
+ return -ENOMEM;
+
+ sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
+ netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
+ dev_kfree_skb_any(sd->skb);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void rhine_reset_rbufs(struct rhine_private *rp)
+{
int i;
- rp->dirty_rx = rp->cur_rx = 0;
+ rp->cur_rx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++)
+ rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+}
+
+static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
+ struct rhine_skb_dma *sd, int entry)
+{
+ rp->rx_skbuff_dma[entry] = sd->dma;
+ rp->rx_skbuff[entry] = sd->skb;
+
+ rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
+ dma_wmb();
+}
+
+static void free_rbufs(struct net_device* dev);
+
+static int alloc_rbufs(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ dma_addr_t next;
+ int rc, i;
rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
- rp->rx_head_desc = &rp->rx_ring[0];
next = rp->rx_ring_dma;
/* Init the ring entries */
@@ -1239,23 +1282,20 @@ static void alloc_rbufs(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
- rp->rx_skbuff[i] = skb;
- if (skb == NULL)
- break;
+ struct rhine_skb_dma sd;
- rp->rx_skbuff_dma[i] =
- dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
- rp->rx_skbuff_dma[i] = 0;
- dev_kfree_skb(skb);
- break;
+ rc = rhine_skb_dma_init(dev, &sd);
+ if (rc < 0) {
+ free_rbufs(dev);
+ goto out;
}
- rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
- rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+
+ rhine_skb_dma_nic_store(rp, &sd, i);
}
- rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ rhine_reset_rbufs(rp);
+out:
+ return rc;
}
static void free_rbufs(struct net_device* dev)
@@ -1659,16 +1699,18 @@ static int rhine_open(struct net_device *dev)
rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
if (rc)
- return rc;
+ goto out;
netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
rc = alloc_ring(dev);
- if (rc) {
- free_irq(rp->irq, dev);
- return rc;
- }
- alloc_rbufs(dev);
+ if (rc < 0)
+ goto out_free_irq;
+
+ rc = alloc_rbufs(dev);
+ if (rc < 0)
+ goto out_free_ring;
+
alloc_tbufs(dev);
rhine_chip_reset(dev);
rhine_task_enable(rp);
@@ -1680,7 +1722,14 @@ static int rhine_open(struct net_device *dev)
netif_start_queue(dev);
- return 0;
+out:
+ return rc;
+
+out_free_ring:
+ free_ring(dev);
+out_free_irq:
+ free_irq(rp->irq, dev);
+ goto out;
}
static void rhine_reset_task(struct work_struct *work)
@@ -1700,9 +1749,9 @@ static void rhine_reset_task(struct work_struct *work)
/* clear all descriptors */
free_tbufs(dev);
- free_rbufs(dev);
alloc_tbufs(dev);
- alloc_rbufs(dev);
+
+ rhine_reset_rbufs(rp);
/* Reinitialize the hardware. */
rhine_chip_reset(dev);
@@ -1730,6 +1779,11 @@ static void rhine_tx_timeout(struct net_device *dev)
schedule_work(&rp->reset_task);
}
+static inline bool rhine_tx_queue_full(struct rhine_private *rp)
+{
+ return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
+}
+
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1800,11 +1854,17 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
netdev_sent_queue(dev, skb->len);
/* lock eth irq */
- wmb();
+ dma_wmb();
rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
wmb();
rp->cur_tx++;
+ /*
+ * Nobody wants cur_tx write to rot for ages after the NIC will have
+ * seen the transmit request, especially as the transmit completion
+ * handler could miss it.
+ */
+ smp_wmb();
/* Non-x86 Todo: explicitly flush cache lines here. */
@@ -1817,8 +1877,14 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
ioaddr + ChipCmd1);
IOSYNC;
- if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
+ /* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
+ if (rhine_tx_queue_full(rp)) {
netif_stop_queue(dev);
+ smp_rmb();
+ /* Rejuvenate. */
+ if (!rhine_tx_queue_full(rp))
+ netif_wake_queue(dev);
+ }
netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
rp->cur_tx - 1, entry);
@@ -1866,13 +1932,24 @@ static void rhine_tx(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
struct device *hwdev = dev->dev.parent;
- int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int dirty_tx = rp->dirty_tx;
+ unsigned int cur_tx;
struct sk_buff *skb;
+ /*
+ * The race with rhine_start_tx does not matter here as long as the
+ * driver enforces a value of cur_tx that was relevant when the
+ * packet was scheduled to the network chipset.
+ * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
+ */
+ smp_rmb();
+ cur_tx = rp->cur_tx;
/* find and cleanup dirty tx descriptors */
- while (rp->dirty_tx != rp->cur_tx) {
- txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
+ while (dirty_tx != cur_tx) {
+ unsigned int entry = dirty_tx % TX_RING_SIZE;
+ u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
+
netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
entry, txstatus);
if (txstatus & DescOwn)
@@ -1921,12 +1998,23 @@ static void rhine_tx(struct net_device *dev)
pkts_compl++;
dev_consume_skb_any(skb);
rp->tx_skbuff[entry] = NULL;
- entry = (++rp->dirty_tx) % TX_RING_SIZE;
+ dirty_tx++;
}
+ rp->dirty_tx = dirty_tx;
+ /* Pity we can't rely on the nearby BQL completion implicit barrier. */
+ smp_wmb();
+
netdev_completed_queue(dev, pkts_compl, bytes_compl);
- if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
+
+ /* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
+ if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
netif_wake_queue(dev);
+ smp_rmb();
+ /* Rejuvenate. */
+ if (rhine_tx_queue_full(rp))
+ netif_stop_queue(dev);
+ }
}
/**
@@ -1944,22 +2032,33 @@ static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
return be16_to_cpup((__be16 *)trailer);
}
+static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
+ int data_size)
+{
+ dma_rmb();
+ if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
+ u16 vlan_tci;
+
+ vlan_tci = rhine_get_vlan_tci(skb, data_size);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ }
+}
+
/* Process up to limit frames from receive ring */
static int rhine_rx(struct net_device *dev, int limit)
{
struct rhine_private *rp = netdev_priv(dev);
struct device *hwdev = dev->dev.parent;
- int count;
int entry = rp->cur_rx % RX_RING_SIZE;
+ int count;
netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
- entry, le32_to_cpu(rp->rx_head_desc->rx_status));
+ entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
/* If EOP is set on the next entry, it's a new packet. Send it up. */
for (count = 0; count < limit; ++count) {
- struct rx_desc *desc = rp->rx_head_desc;
+ struct rx_desc *desc = rp->rx_ring + entry;
u32 desc_status = le32_to_cpu(desc->rx_status);
- u32 desc_length = le32_to_cpu(desc->desc_length);
int data_size = desc_status >> 16;
if (desc_status & DescOwn)
@@ -1975,10 +2074,6 @@ static int rhine_rx(struct net_device *dev, int limit)
"entry %#x length %d status %08x!\n",
entry, data_size,
desc_status);
- netdev_warn(dev,
- "Oversized Ethernet frame %p vs %p\n",
- rp->rx_head_desc,
- &rp->rx_ring[entry]);
dev->stats.rx_length_errors++;
} else if (desc_status & RxErr) {
/* There was a error. */
@@ -2000,16 +2095,17 @@ static int rhine_rx(struct net_device *dev, int limit)
}
}
} else {
- struct sk_buff *skb = NULL;
/* Length should omit the CRC */
int pkt_len = data_size - 4;
- u16 vlan_tci = 0;
+ struct sk_buff *skb;
/* Check if the packet is long enough to accept without
copying to a minimally-sized skbuff. */
- if (pkt_len < rx_copybreak)
+ if (pkt_len < rx_copybreak) {
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
- if (skb) {
+ if (unlikely(!skb))
+ goto drop;
+
dma_sync_single_for_cpu(hwdev,
rp->rx_skbuff_dma[entry],
rp->rx_buf_sz,
@@ -2018,32 +2114,31 @@ static int rhine_rx(struct net_device *dev, int limit)
skb_copy_to_linear_data(skb,
rp->rx_skbuff[entry]->data,
pkt_len);
- skb_put(skb, pkt_len);
+
dma_sync_single_for_device(hwdev,
rp->rx_skbuff_dma[entry],
rp->rx_buf_sz,
DMA_FROM_DEVICE);
} else {
+ struct rhine_skb_dma sd;
+
+ if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
+ goto drop;
+
skb = rp->rx_skbuff[entry];
- if (skb == NULL) {
- netdev_err(dev, "Inconsistent Rx descriptor chain\n");
- break;
- }
- rp->rx_skbuff[entry] = NULL;
- skb_put(skb, pkt_len);
+
dma_unmap_single(hwdev,
rp->rx_skbuff_dma[entry],
rp->rx_buf_sz,
DMA_FROM_DEVICE);
+ rhine_skb_dma_nic_store(rp, &sd, entry);
}
- if (unlikely(desc_length & DescTag))
- vlan_tci = rhine_get_vlan_tci(skb, data_size);
-
+ skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, dev);
- if (unlikely(desc_length & DescTag))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ rhine_rx_vlan_tag(skb, desc, data_size);
+
netif_receive_skb(skb);
u64_stats_update_begin(&rp->rx_stats.syncp);
@@ -2051,35 +2146,16 @@ static int rhine_rx(struct net_device *dev, int limit)
rp->rx_stats.packets++;
u64_stats_update_end(&rp->rx_stats.syncp);
}
+give_descriptor_to_nic:
+ desc->rx_status = cpu_to_le32(DescOwn);
entry = (++rp->cur_rx) % RX_RING_SIZE;
- rp->rx_head_desc = &rp->rx_ring[entry];
- }
-
- /* Refill the Rx ring buffers. */
- for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
- struct sk_buff *skb;
- entry = rp->dirty_rx % RX_RING_SIZE;
- if (rp->rx_skbuff[entry] == NULL) {
- skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
- rp->rx_skbuff[entry] = skb;
- if (skb == NULL)
- break; /* Better luck next round. */
- rp->rx_skbuff_dma[entry] =
- dma_map_single(hwdev, skb->data,
- rp->rx_buf_sz,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(hwdev,
- rp->rx_skbuff_dma[entry])) {
- dev_kfree_skb(skb);
- rp->rx_skbuff_dma[entry] = 0;
- break;
- }
- rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
- }
- rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
}
return count;
+
+drop:
+ dev->stats.rx_dropped++;
+ goto give_descriptor_to_nic;
}
static void rhine_restart_tx(struct net_device *dev) {
@@ -2484,9 +2560,8 @@ static int rhine_resume(struct device *device)
enable_mmio(rp->pioaddr, rp->quirks);
rhine_power_init(dev);
free_tbufs(dev);
- free_rbufs(dev);
alloc_tbufs(dev);
- alloc_rbufs(dev);
+ rhine_reset_rbufs(rp);
rhine_task_enable(rp);
spin_lock_bh(&rp->lock);
init_registers(dev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index af2694dc6f90..5a1068df7038 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -62,12 +62,12 @@
u32 temac_ior(struct temac_local *lp, int offset)
{
- return in_be32((u32 *)(lp->regs + offset));
+ return in_be32(lp->regs + offset);
}
void temac_iow(struct temac_local *lp, int offset, u32 value)
{
- out_be32((u32 *) (lp->regs + offset), value);
+ out_be32(lp->regs + offset, value);
}
int temac_indirect_busywait(struct temac_local *lp)
@@ -124,7 +124,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
*/
static u32 temac_dma_in32(struct temac_local *lp, int reg)
{
- return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
+ return in_be32(lp->sdma_regs + (reg << 2));
}
/**
@@ -134,7 +134,7 @@ static u32 temac_dma_in32(struct temac_local *lp, int reg)
*/
static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
{
- out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
+ out_be32(lp->sdma_regs + (reg << 2), value);
}
/* DMA register access functions can be DCR based or memory mapped.
@@ -400,7 +400,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
mutex_unlock(&lp->indirect_mutex);
}
-struct temac_option {
+static struct temac_option {
int flg;
u32 opt;
u32 reg;
@@ -587,7 +587,7 @@ static void temac_device_reset(struct net_device *ndev)
ndev->trans_start = jiffies; /* prevent tx timeout */
}
-void temac_adjust_link(struct net_device *ndev)
+static void temac_adjust_link(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct phy_device *phy = lp->phy_dev;
@@ -688,10 +688,8 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag)) {
- if (!netif_queue_stopped(ndev)) {
+ if (!netif_queue_stopped(ndev))
netif_stop_queue(ndev);
- return NETDEV_TX_BUSY;
- }
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 4c9b4fa1d3c1..7cb9abac95c8 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -11,16 +11,16 @@
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
/* Packet size info */
#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
-#define XAE_HDR_VLAN_SIZE 18 /* Size of an Ethernet hdr + VLAN */
#define XAE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
#define XAE_MTU 1500 /* Max MTU of an Ethernet frame */
#define XAE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
#define XAE_MAX_FRAME_SIZE (XAE_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
-#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + XAE_HDR_VLAN_SIZE + XAE_TRL_SIZE)
+#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + VLAN_ETH_HLEN + XAE_TRL_SIZE)
#define XAE_MAX_JUMBO_FRAME_SIZE (XAE_JUMBO_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
/* Configuration options */
@@ -38,18 +38,21 @@
#define XAE_OPTION_FLOW_CONTROL (1 << 4)
/* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not
- * stripped. Default: disabled (set) */
+ * stripped. Default: disabled (set)
+ */
#define XAE_OPTION_FCS_STRIP (1 << 5)
/* Generate FCS field and add PAD automatically for outgoing frames.
- * Default: enabled (set) */
+ * Default: enabled (set)
+ */
#define XAE_OPTION_FCS_INSERT (1 << 6)
/* Enable Length/Type error checking for incoming frames. When this option is
* set, the MAC will filter frames that have a mismatched type/length field
* and if XAE_OPTION_REPORT_RXERR is set, the user is notified when these
* types of frames are encountered. When this option is cleared, the MAC will
- * allow these types of frames to be received. Default: enabled (set) */
+ * allow these types of frames to be received. Default: enabled (set)
+ */
#define XAE_OPTION_LENTYPE_ERR (1 << 7)
/* Enable the transmitter. Default: enabled (set) */
@@ -159,12 +162,12 @@
#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
#define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */
-#define XAE_MDIO_MIP_OFFSET 0x00000620 /* MII Mgmt Interrupt Pending
- * register offset */
-#define XAE_MDIO_MIE_OFFSET 0x00000640 /* MII Management Interrupt Enable
- * register offset */
-#define XAE_MDIO_MIC_OFFSET 0x00000660 /* MII Management Interrupt Clear
- * register offset. */
+/* MII Mgmt Interrupt Pending register offset */
+#define XAE_MDIO_MIP_OFFSET 0x00000620
+/* MII Management Interrupt Enable register offset */
+#define XAE_MDIO_MIE_OFFSET 0x00000640
+/* MII Management Interrupt Clear register offset. */
+#define XAE_MDIO_MIC_OFFSET 0x00000660
#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
@@ -176,18 +179,17 @@
#define XAE_MCAST_TABLE_OFFSET 0x00020000 /* Multicast table address */
/* Bit Masks for Axi Ethernet RAF register */
-#define XAE_RAF_MCSTREJ_MASK 0x00000002 /* Reject receive multicast
- * destination address */
-#define XAE_RAF_BCSTREJ_MASK 0x00000004 /* Reject receive broadcast
- * destination address */
+/* Reject receive multicast destination address */
+#define XAE_RAF_MCSTREJ_MASK 0x00000002
+/* Reject receive broadcast destination address */
+#define XAE_RAF_BCSTREJ_MASK 0x00000004
#define XAE_RAF_TXVTAGMODE_MASK 0x00000018 /* Tx VLAN TAG mode */
#define XAE_RAF_RXVTAGMODE_MASK 0x00000060 /* Rx VLAN TAG mode */
#define XAE_RAF_TXVSTRPMODE_MASK 0x00000180 /* Tx VLAN STRIP mode */
#define XAE_RAF_RXVSTRPMODE_MASK 0x00000600 /* Rx VLAN STRIP mode */
#define XAE_RAF_NEWFNCENBL_MASK 0x00000800 /* New function mode */
-#define XAE_RAF_EMULTIFLTRENBL_MASK 0x00001000 /* Exteneded Multicast
- * Filtering mode
- */
+/* Exteneded Multicast Filtering mode */
+#define XAE_RAF_EMULTIFLTRENBL_MASK 0x00001000
#define XAE_RAF_STATSRST_MASK 0x00002000 /* Stats. Counter Reset */
#define XAE_RAF_RXBADFRMEN_MASK 0x00004000 /* Recv Bad Frame Enable */
#define XAE_RAF_TXVTAGMODE_SHIFT 3 /* Tx Tag mode shift bits */
@@ -197,15 +199,16 @@
/* Bit Masks for Axi Ethernet TPF and IFGP registers */
#define XAE_TPF_TPFV_MASK 0x0000FFFF /* Tx pause frame value */
-#define XAE_IFGP0_IFGP_MASK 0x0000007F /* Transmit inter-frame
- * gap adjustment value */
+/* Transmit inter-frame gap adjustment value */
+#define XAE_IFGP0_IFGP_MASK 0x0000007F
/* Bit Masks for Axi Ethernet IS, IE and IP registers, Same masks apply
- * for all 3 registers. */
-#define XAE_INT_HARDACSCMPLT_MASK 0x00000001 /* Hard register access
- * complete */
-#define XAE_INT_AUTONEG_MASK 0x00000002 /* Auto negotiation
- * complete */
+ * for all 3 registers.
+ */
+/* Hard register access complete */
+#define XAE_INT_HARDACSCMPLT_MASK 0x00000001
+/* Auto negotiation complete */
+#define XAE_INT_AUTONEG_MASK 0x00000002
#define XAE_INT_RXCMPIT_MASK 0x00000004 /* Rx complete */
#define XAE_INT_RXRJECT_MASK 0x00000008 /* Rx frame rejected */
#define XAE_INT_RXFIFOOVR_MASK 0x00000010 /* Rx fifo overrun */
@@ -215,10 +218,9 @@
#define XAE_INT_PHYRSTCMPLT_MASK 0x00000100 /* Phy Reset complete */
#define XAE_INT_ALL_MASK 0x0000003F /* All the ints */
+/* INT bits that indicate receive errors */
#define XAE_INT_RECV_ERROR_MASK \
- (XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK) /* INT bits that
- * indicate receive
- * errors */
+ (XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK)
/* Bit masks for Axi Ethernet VLAN TPID Word 0 register */
#define XAE_TPID_0_MASK 0x0000FFFF /* TPID 0 */
@@ -231,27 +233,28 @@
/* Bit masks for Axi Ethernet RCW1 register */
#define XAE_RCW1_RST_MASK 0x80000000 /* Reset */
#define XAE_RCW1_JUM_MASK 0x40000000 /* Jumbo frame enable */
-#define XAE_RCW1_FCS_MASK 0x20000000 /* In-Band FCS enable
- * (FCS not stripped) */
+/* In-Band FCS enable (FCS not stripped) */
+#define XAE_RCW1_FCS_MASK 0x20000000
#define XAE_RCW1_RX_MASK 0x10000000 /* Receiver enable */
#define XAE_RCW1_VLAN_MASK 0x08000000 /* VLAN frame enable */
-#define XAE_RCW1_LT_DIS_MASK 0x02000000 /* Length/type field valid check
- * disable */
-#define XAE_RCW1_CL_DIS_MASK 0x01000000 /* Control frame Length check
- * disable */
-#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF /* Pause frame source address
- * bits [47:32]. Bits [31:0] are
- * stored in register RCW0 */
+/* Length/type field valid check disable */
+#define XAE_RCW1_LT_DIS_MASK 0x02000000
+/* Control frame Length check disable */
+#define XAE_RCW1_CL_DIS_MASK 0x01000000
+/* Pause frame source address bits [47:32]. Bits [31:0] are
+ * stored in register RCW0
+ */
+#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF
/* Bit masks for Axi Ethernet TC register */
#define XAE_TC_RST_MASK 0x80000000 /* Reset */
#define XAE_TC_JUM_MASK 0x40000000 /* Jumbo frame enable */
-#define XAE_TC_FCS_MASK 0x20000000 /* In-Band FCS enable
- * (FCS not generated) */
+/* In-Band FCS enable (FCS not generated) */
+#define XAE_TC_FCS_MASK 0x20000000
#define XAE_TC_TX_MASK 0x10000000 /* Transmitter enable */
#define XAE_TC_VLAN_MASK 0x08000000 /* VLAN frame enable */
-#define XAE_TC_IFG_MASK 0x02000000 /* Inter-frame gap adjustment
- * enable */
+/* Inter-frame gap adjustment enable */
+#define XAE_TC_IFG_MASK 0x02000000
/* Bit masks for Axi Ethernet FCC register */
#define XAE_FCC_FCRX_MASK 0x20000000 /* Rx flow control enable */
@@ -301,10 +304,10 @@
#define XAE_MDIO_INT_MIIM_RDY_MASK 0x00000001 /* MIIM Interrupt */
/* Bit masks for Axi Ethernet UAW1 register */
-#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF /* Station address bits
- * [47:32]; Station address
- * bits [31:0] are stored in
- * register UAW0 */
+/* Station address bits [47:32]; Station address
+ * bits [31:0] are stored in register UAW0
+ */
+#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
/* Bit masks for Axi Ethernet FMI register */
#define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */
@@ -320,8 +323,8 @@
#define XAE_PHY_TYPE_SGMII 4
#define XAE_PHY_TYPE_1000BASE_X 5
-#define XAE_MULTICAST_CAM_TABLE_NUM 4 /* Total number of entries in the
- * hardware multicast table. */
+ /* Total number of entries in the hardware multicast table. */
+#define XAE_MULTICAST_CAM_TABLE_NUM 4
/* Axi Ethernet Synthesis features */
#define XAE_FEATURE_PARTIAL_RX_CSUM (1 << 0)
@@ -407,8 +410,11 @@ struct axidma_bd {
* Txed/Rxed in the existing hardware. If jumbo option is
* supported, the maximum frame size would be 9k. Else it is
* 1522 bytes (assuming support for basic VLAN)
- * @jumbo_support: Stores hardware configuration for jumbo support. If hardware
- * can handle jumbo packets, this entry will be 1, else 0.
+ * @rxmem: Stores rx memory size for jumbo frame handling.
+ * @csum_offload_on_tx_path: Stores the checksum selection on TX side.
+ * @csum_offload_on_rx_path: Stores the checksum selection on RX side.
+ * @coalesce_count_rx: Store the irq coalesce on RX side.
+ * @coalesce_count_tx: Store the irq coalesce on TX side.
*/
struct axienet_local {
struct net_device *ndev;
@@ -446,7 +452,7 @@ struct axienet_local {
u32 rx_bd_ci;
u32 max_frm_size;
- u32 jumbo_support;
+ u32 rxmem;
int csum_offload_on_tx_path;
int csum_offload_on_rx_path;
@@ -472,7 +478,7 @@ struct axienet_option {
* @lp: Pointer to axienet local structure
* @offset: Address offset from the base address of Axi Ethernet core
*
- * returns: The contents of the Axi Ethernet register
+ * Return: The contents of the Axi Ethernet register
*
* This function returns the contents of the corresponding register.
*/
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 28b7e7d9c272..4208dd7ef101 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -117,7 +117,7 @@ static struct axienet_option axienet_options[] = {
* @lp: Pointer to axienet local structure
* @reg: Address offset from the base address of the Axi DMA core
*
- * returns: The contents of the Axi DMA register
+ * Return: The contents of the Axi DMA register
*
* This function returns the contents of the corresponding Axi DMA register.
*/
@@ -179,8 +179,7 @@ static void axienet_dma_bd_release(struct net_device *ndev)
* axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
* @ndev: Pointer to the net_device structure
*
- * returns: 0, on success
- * -ENOMEM, on failure
+ * Return: 0, on success -ENOMEM, on failure
*
* This function is called to initialize the Rx and Tx DMA descriptor
* rings. This initializes the descriptors with required default values
@@ -198,9 +197,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
- /*
- * Allocate the Tx and Rx buffer descriptors.
- */
+ /* Allocate the Tx and Rx buffer descriptors. */
lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p, GFP_KERNEL);
@@ -263,7 +260,8 @@ static int axienet_dma_bd_init(struct net_device *ndev)
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.*/
+ * halted state. This will make the Rx side ready for reception.
+ */
axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
@@ -273,7 +271,8 @@ static int axienet_dma_bd_init(struct net_device *ndev)
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting */
+ * tail pointer register that the Tx channel will start transmitting.
+ */
axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
@@ -320,7 +319,7 @@ static void axienet_set_mac_address(struct net_device *ndev, void *address)
* @ndev: Pointer to the net_device structure
* @p: 6 byte Address to be written as MAC address
*
- * returns: 0 for all conditions. Presently, there is no failure case.
+ * Return: 0 for all conditions. Presently, there is no failure case.
*
* This function is called to initialize the MAC address of the Axi Ethernet
* core. It calls the core specific axienet_set_mac_address. This is the
@@ -354,7 +353,8 @@ static void axienet_set_multicast_list(struct net_device *ndev)
netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
/* We must make the kernel realize we had to move into
* promiscuous mode. If it was a promiscuous mode request
- * the flag is already set. If not we set it. */
+ * the flag is already set. If not we set it.
+ */
ndev->flags |= IFF_PROMISC;
reg = axienet_ior(lp, XAE_FMI_OFFSET);
reg |= XAE_FMI_PM_MASK;
@@ -438,14 +438,15 @@ static void __axienet_device_reset(struct axienet_local *lp,
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
- * reset process. */
+ * reset process.
+ */
axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
timeout = DELAY_OF_ONE_MILLISEC;
while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
udelay(1);
if (--timeout == 0) {
- dev_err(dev, "axienet_device_reset DMA "
- "reset timeout!\n");
+ netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
+ __func__);
break;
}
}
@@ -471,19 +472,21 @@ static void axienet_device_reset(struct net_device *ndev)
__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
+ lp->options |= XAE_OPTION_VLAN;
lp->options &= (~XAE_OPTION_JUMBO);
if ((ndev->mtu > XAE_MTU) &&
- (ndev->mtu <= XAE_JUMBO_MTU) &&
- (lp->jumbo_support)) {
- lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
- XAE_TRL_SIZE;
- lp->options |= XAE_OPTION_JUMBO;
+ (ndev->mtu <= XAE_JUMBO_MTU)) {
+ lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
+ XAE_TRL_SIZE;
+
+ if (lp->max_frm_size <= lp->rxmem)
+ lp->options |= XAE_OPTION_JUMBO;
}
if (axienet_dma_bd_init(ndev)) {
- dev_err(&ndev->dev, "axienet_device_reset descriptor "
- "allocation failed\n");
+ netdev_err(ndev, "%s: descriptor allocation failed\n",
+ __func__);
}
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
@@ -497,7 +500,8 @@ static void axienet_device_reset(struct net_device *ndev)
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
/* Sync default options with HW but leave receiver and
- * transmitter disabled.*/
+ * transmitter disabled.
+ */
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
@@ -558,8 +562,8 @@ static void axienet_adjust_link(struct net_device *ndev)
lp->last_link = link_state;
phy_print_status(phy);
} else {
- dev_err(&ndev->dev, "Error setting Axi Ethernet "
- "mac speed\n");
+ netdev_err(ndev,
+ "Error setting Axi Ethernet mac speed\n");
}
}
}
@@ -617,7 +621,7 @@ static void axienet_start_xmit_done(struct net_device *ndev)
* @lp: Pointer to the axienet_local structure
* @num_frag: The number of BDs to check for
*
- * returns: 0, on success
+ * Return: 0, on success
* NETDEV_TX_BUSY, if any of the descriptors are not free
*
* This function is invoked before BDs are allocated and transmission starts.
@@ -640,7 +644,7 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
* @skb: sk_buff pointer that contains data to be Txed.
* @ndev: Pointer to net_device structure.
*
- * returns: NETDEV_TX_OK, on success
+ * Return: NETDEV_TX_OK, on success
* NETDEV_TX_BUSY, if any of the descriptors are not free
*
* This function is invoked from upper layers to initiate transmission. The
@@ -726,15 +730,15 @@ static void axienet_recv(struct net_device *ndev)
u32 csumstatus;
u32 size = 0;
u32 packets = 0;
- dma_addr_t tail_p;
+ dma_addr_t tail_p = 0;
struct axienet_local *lp = netdev_priv(ndev);
struct sk_buff *skb, *new_skb;
struct axidma_bd *cur_p;
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
skb = (struct sk_buff *) (cur_p->sw_id_offset);
length = cur_p->app4 & 0x0000FFFF;
@@ -786,7 +790,8 @@ static void axienet_recv(struct net_device *ndev)
ndev->stats.rx_packets += packets;
ndev->stats.rx_bytes += size;
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ if (tail_p)
+ axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
}
/**
@@ -794,7 +799,7 @@ static void axienet_recv(struct net_device *ndev)
* @irq: irq number
* @_ndev: net_device pointer
*
- * returns: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED for all cases.
*
* This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
* to complete the BD processing.
@@ -808,6 +813,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
axienet_start_xmit_done(lp->ndev);
goto out;
}
@@ -831,9 +837,9 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
tasklet_schedule(&lp->dma_err_tasklet);
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
}
out:
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
return IRQ_HANDLED;
}
@@ -842,7 +848,7 @@ out:
* @irq: irq number
* @_ndev: net_device pointer
*
- * returns: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED for all cases.
*
* This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
* processing.
@@ -856,6 +862,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
axienet_recv(lp->ndev);
goto out;
}
@@ -879,9 +886,9 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
tasklet_schedule(&lp->dma_err_tasklet);
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
}
out:
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
return IRQ_HANDLED;
}
@@ -891,7 +898,7 @@ static void axienet_dma_err_handler(unsigned long data);
* axienet_open - Driver open routine.
* @ndev: Pointer to net_device structure
*
- * returns: 0, on success.
+ * Return: 0, on success.
* -ENODEV, if PHY cannot be connected to
* non-zero error value on failure
*
@@ -914,7 +921,8 @@ static int axienet_open(struct net_device *ndev)
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
* including the MDIO. If MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards. */
+ * process is started, MDIO will be broken afterwards.
+ */
axienet_iow(lp, XAE_MDIO_MC_OFFSET,
(mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
axienet_device_reset(ndev);
@@ -925,14 +933,20 @@ static int axienet_open(struct net_device *ndev)
return ret;
if (lp->phy_node) {
- lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ if (lp->phy_type == XAE_PHY_TYPE_GMII) {
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
axienet_adjust_link, 0,
PHY_INTERFACE_MODE_GMII);
- if (!lp->phy_dev) {
- dev_err(lp->dev, "of_phy_connect() failed\n");
- return -ENODEV;
+ } else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) {
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link, 0,
+ PHY_INTERFACE_MODE_RGMII_ID);
}
- phy_start(lp->phy_dev);
+
+ if (!lp->phy_dev)
+ dev_err(lp->dev, "of_phy_connect() failed\n");
+ else
+ phy_start(lp->phy_dev);
}
/* Enable tasklets for Axi DMA error handling */
@@ -965,7 +979,7 @@ err_tx_irq:
* axienet_stop - Driver stop routine.
* @ndev: Pointer to net_device structure
*
- * returns: 0, on success.
+ * Return: 0, on success.
*
* This is the driver stop routine. It calls phy_disconnect to stop the PHY
* device. It also removes the interrupt handlers and disables the interrupts.
@@ -1005,7 +1019,7 @@ static int axienet_stop(struct net_device *ndev)
* @ndev: Pointer to net_device structure
* @new_mtu: New mtu value to be applied
*
- * returns: Always returns 0 (success).
+ * Return: Always returns 0 (success).
*
* This is the change mtu driver routine. It checks if the Axi Ethernet
* hardware supports jumbo frames before changing the mtu. This can be
@@ -1017,15 +1031,15 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
if (netif_running(ndev))
return -EBUSY;
- if (lp->jumbo_support) {
- if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
- return -EINVAL;
- ndev->mtu = new_mtu;
- } else {
- if ((new_mtu > XAE_MTU) || (new_mtu < 64))
- return -EINVAL;
- ndev->mtu = new_mtu;
- }
+
+ if ((new_mtu + VLAN_ETH_HLEN +
+ XAE_TRL_SIZE) > lp->rxmem)
+ return -EINVAL;
+
+ if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
+ return -EINVAL;
+
+ ndev->mtu = new_mtu;
return 0;
}
@@ -1072,6 +1086,8 @@ static const struct net_device_ops axienet_netdev_ops = {
* not be found, the function returns -ENODEV. This function calls the
* relevant PHY ethtool API to get the PHY settings.
* Issue "ethtool ethX" under linux prompt to execute this function.
+ *
+ * Return: 0 on success, -ENODEV if PHY doesn't exist
*/
static int axienet_ethtools_get_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
@@ -1093,6 +1109,8 @@ static int axienet_ethtools_get_settings(struct net_device *ndev,
* relevant PHY ethtool API to set the PHY.
* Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
* function.
+ *
+ * Return: 0 on success, -ENODEV if PHY doesn't exist
*/
static int axienet_ethtools_set_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
@@ -1127,6 +1145,8 @@ static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
*
* This implements ethtool command for getting the total register length
* information.
+ *
+ * Return: the total regs length
*/
static int axienet_ethtools_get_regs_len(struct net_device *ndev)
{
@@ -1213,11 +1233,13 @@ axienet_ethtools_get_pauseparam(struct net_device *ndev,
* axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
* settings.
* @ndev: Pointer to net_device structure
- * @epauseparam:Pointer to ethtool_pauseparam structure
+ * @epauseparm:Pointer to ethtool_pauseparam structure
*
* This implements ethtool command for enabling flow control on Rx and Tx
* paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
* function.
+ *
+ * Return: 0 on success, -EFAULT if device is running
*/
static int
axienet_ethtools_set_pauseparam(struct net_device *ndev,
@@ -1227,8 +1249,8 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
struct axienet_local *lp = netdev_priv(ndev);
if (netif_running(ndev)) {
- printk(KERN_ERR "%s: Please stop netif before applying "
- "configruation\n", ndev->name);
+ netdev_err(ndev,
+ "Please stop netif before applying configuration\n");
return -EFAULT;
}
@@ -1254,6 +1276,8 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
* This implements ethtool command for getting the DMA interrupt coalescing
* count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
* execute this function.
+ *
+ * Return: 0 always
*/
static int axienet_ethtools_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *ecoalesce)
@@ -1277,6 +1301,8 @@ static int axienet_ethtools_get_coalesce(struct net_device *ndev,
* This implements ethtool command for setting the DMA interrupt coalescing
* count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
* prompt to execute this function.
+ *
+ * Return: 0, on success, Non-zero error value on failure.
*/
static int axienet_ethtools_set_coalesce(struct net_device *ndev,
struct ethtool_coalesce *ecoalesce)
@@ -1284,8 +1310,8 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
struct axienet_local *lp = netdev_priv(ndev);
if (netif_running(ndev)) {
- printk(KERN_ERR "%s: Please stop netif before applying "
- "configruation\n", ndev->name);
+ netdev_err(ndev,
+ "Please stop netif before applying configuration\n");
return -EFAULT;
}
@@ -1354,7 +1380,8 @@ static void axienet_dma_err_handler(unsigned long data)
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
* including the MDIO. So if MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards. */
+ * process is started, MDIO will be broken afterwards.
+ */
axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
~XAE_MDIO_MC_MDIOEN_MASK));
@@ -1425,7 +1452,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.*/
+ * halted state. This will make the Rx side ready for reception.
+ */
axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
@@ -1435,7 +1463,8 @@ static void axienet_dma_err_handler(unsigned long data)
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting */
+ * tail pointer register that the Tx channel will start transmitting
+ */
axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
@@ -1451,7 +1480,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
/* Sync default options with HW but leave receiver and
- * transmitter disabled.*/
+ * transmitter disabled.
+ */
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
@@ -1460,11 +1490,10 @@ static void axienet_dma_err_handler(unsigned long data)
}
/**
- * axienet_of_probe - Axi Ethernet probe function.
- * @op: Pointer to platform device structure.
- * @match: Pointer to device id structure
+ * axienet_probe - Axi Ethernet probe function.
+ * @pdev: Pointer to platform device structure.
*
- * returns: 0, on success
+ * Return: 0, on success
* Non-zero error value on failure.
*
* This is the probe routine for Axi Ethernet driver. This is called before
@@ -1472,22 +1501,23 @@ static void axienet_dma_err_handler(unsigned long data)
* device. Parses through device tree and populates fields of
* axienet_local. It registers the Ethernet device.
*/
-static int axienet_of_probe(struct platform_device *op)
+static int axienet_probe(struct platform_device *pdev)
{
- __be32 *p;
- int size, ret = 0;
+ int ret;
struct device_node *np;
struct axienet_local *lp;
struct net_device *ndev;
- const void *addr;
+ u8 mac_addr[6];
+ struct resource *ethres, dmares;
+ u32 value;
ndev = alloc_etherdev(sizeof(*lp));
if (!ndev)
return -ENOMEM;
- platform_set_drvdata(op, ndev);
+ platform_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, &op->dev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG;
ndev->netdev_ops = &axienet_netdev_ops;
@@ -1495,21 +1525,23 @@ static int axienet_of_probe(struct platform_device *op)
lp = netdev_priv(ndev);
lp->ndev = ndev;
- lp->dev = &op->dev;
+ lp->dev = &pdev->dev;
lp->options = XAE_OPTION_DEFAULTS;
/* Map device registers */
- lp->regs = of_iomap(op->dev.of_node, 0);
+ ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
if (!lp->regs) {
- dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
+ dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
ret = -ENOMEM;
- goto nodev;
+ goto free_netdev;
}
+
/* Setup checksum offload, but default to off if not specified */
lp->features = 0;
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
- if (p) {
- switch (be32_to_cpup(p)) {
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
+ if (!ret) {
+ switch (value) {
case 1:
lp->csum_offload_on_tx_path =
XAE_FEATURE_PARTIAL_TX_CSUM;
@@ -1528,9 +1560,9 @@ static int axienet_of_probe(struct platform_device *op)
lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
}
}
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
- if (p) {
- switch (be32_to_cpup(p)) {
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
+ if (!ret) {
+ switch (value) {
case 1:
lp->csum_offload_on_rx_path =
XAE_FEATURE_PARTIAL_RX_CSUM;
@@ -1546,82 +1578,77 @@ static int axienet_of_probe(struct platform_device *op)
}
}
/* For supporting jumbo frames, the Axi Ethernet hardware must have
- * a larger Rx/Tx Memory. Typically, the size must be more than or
- * equal to 16384 bytes, so that we can enable jumbo option and start
- * supporting jumbo frames. Here we check for memory allocated for
- * Rx/Tx in the hardware from the device-tree and accordingly set
- * flags. */
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
- if (p) {
- if ((be32_to_cpup(p)) >= 0x4000)
- lp->jumbo_support = 1;
- }
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
- if (p)
- lp->phy_type = be32_to_cpup(p);
+ * a larger Rx/Tx Memory. Typically, the size must be large so that
+ * we can enable jumbo option and start supporting jumbo frames.
+ * Here we check for memory allocated for Rx/Tx in the hardware from
+ * the device-tree and accordingly set flags.
+ */
+ of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
+ of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_type);
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
- np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
- if (!np) {
- dev_err(&op->dev, "could not find DMA node\n");
- ret = -ENODEV;
- goto err_iounmap;
+ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
+ if (IS_ERR(np)) {
+ dev_err(&pdev->dev, "could not find DMA node\n");
+ ret = PTR_ERR(np);
+ goto free_netdev;
}
- lp->dma_regs = of_iomap(np, 0);
- if (lp->dma_regs) {
- dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
- } else {
- dev_err(&op->dev, "unable to map DMA registers\n");
- of_node_put(np);
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get DMA resource\n");
+ goto free_netdev;
+ }
+ lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
+ if (!lp->dma_regs) {
+ dev_err(&pdev->dev, "could not map DMA regs\n");
+ ret = -ENOMEM;
+ goto free_netdev;
}
lp->rx_irq = irq_of_parse_and_map(np, 1);
lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
- dev_err(&op->dev, "could not determine irqs\n");
+ dev_err(&pdev->dev, "could not determine irqs\n");
ret = -ENOMEM;
- goto err_iounmap_2;
+ goto free_netdev;
}
/* Retrieve the MAC address */
- addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
- if ((!addr) || (size != 6)) {
- dev_err(&op->dev, "could not find MAC address\n");
- ret = -ENODEV;
- goto err_iounmap_2;
+ ret = of_property_read_u8_array(pdev->dev.of_node,
+ "local-mac-address", mac_addr, 6);
+ if (ret) {
+ dev_err(&pdev->dev, "could not find MAC address\n");
+ goto free_netdev;
}
- axienet_set_mac_address(ndev, (void *) addr);
+ axienet_set_mac_address(ndev, (void *)mac_addr);
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
- lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
- ret = axienet_mdio_setup(lp, op->dev.of_node);
- if (ret)
- dev_warn(&op->dev, "error registering MDIO bus\n");
+ lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (lp->phy_node) {
+ ret = axienet_mdio_setup(lp, pdev->dev.of_node);
+ if (ret)
+ dev_warn(&pdev->dev, "error registering MDIO bus\n");
+ }
ret = register_netdev(lp->ndev);
if (ret) {
dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
- goto err_iounmap_2;
+ goto free_netdev;
}
return 0;
-err_iounmap_2:
- if (lp->dma_regs)
- iounmap(lp->dma_regs);
-err_iounmap:
- iounmap(lp->regs);
-nodev:
+free_netdev:
free_netdev(ndev);
- ndev = NULL;
+
return ret;
}
-static int axienet_of_remove(struct platform_device *op)
+static int axienet_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(op);
+ struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
axienet_mdio_teardown(lp);
@@ -1630,24 +1657,21 @@ static int axienet_of_remove(struct platform_device *op)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
- iounmap(lp->regs);
- if (lp->dma_regs)
- iounmap(lp->dma_regs);
free_netdev(ndev);
return 0;
}
-static struct platform_driver axienet_of_driver = {
- .probe = axienet_of_probe,
- .remove = axienet_of_remove,
+static struct platform_driver axienet_driver = {
+ .probe = axienet_probe,
+ .remove = axienet_remove,
.driver = {
.name = "xilinx_axienet",
.of_match_table = axienet_of_match,
},
};
-module_platform_driver(axienet_of_driver);
+module_platform_driver(axienet_driver);
MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
MODULE_AUTHOR("Xilinx");
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 3b67d60d4378..2a5a16834c01 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -37,7 +37,7 @@ int axienet_mdio_wait_until_ready(struct axienet_local *lp)
* @phy_id: Address of the PHY device
* @reg: PHY register to read
*
- * returns: The register contents on success, -ETIMEDOUT on a timeout
+ * Return: The register contents on success, -ETIMEDOUT on a timeout
*
* Reads the contents of the requested register from the requested PHY
* address by first writing the details into MCR register. After a while
@@ -80,7 +80,7 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* @reg: PHY register to write to
* @val: Value to be written into the register
*
- * returns: 0 on success, -ETIMEDOUT on a timeout
+ * Return: 0 on success, -ETIMEDOUT on a timeout
*
* Writes the value to the requested register by first writing the value
* into MWD register. The the MCR register is then appropriately setup
@@ -119,7 +119,7 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
* @lp: Pointer to axienet local data structure.
* @np: Pointer to device node
*
- * returns: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
+ * Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
* mdiobus_alloc (to allocate memory for mii bus structure) fails.
*
* Sets up the MDIO interface by initializing the MDIO clock and enabling the
@@ -161,19 +161,19 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
np1 = of_find_node_by_name(NULL, "cpu");
if (!np1) {
- printk(KERN_WARNING "%s(): Could not find CPU device node.",
- __func__);
- printk(KERN_WARNING "Setting MDIO clock divisor to "
- "default %d\n", DEFAULT_CLOCK_DIVISOR);
+ netdev_warn(lp->ndev, "Could not find CPU device node.\n");
+ netdev_warn(lp->ndev,
+ "Setting MDIO clock divisor to default %d\n",
+ DEFAULT_CLOCK_DIVISOR);
clk_div = DEFAULT_CLOCK_DIVISOR;
goto issue;
}
property_p = (u32 *) of_get_property(np1, "clock-frequency", NULL);
if (!property_p) {
- printk(KERN_WARNING "%s(): Could not find CPU property: "
- "clock-frequency.", __func__);
- printk(KERN_WARNING "Setting MDIO clock divisor to "
- "default %d\n", DEFAULT_CLOCK_DIVISOR);
+ netdev_warn(lp->ndev, "clock-frequency property not found.\n");
+ netdev_warn(lp->ndev,
+ "Setting MDIO clock divisor to default %d\n",
+ DEFAULT_CLOCK_DIVISOR);
clk_div = DEFAULT_CLOCK_DIVISOR;
of_node_put(np1);
goto issue;
@@ -183,12 +183,14 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
/* If there is any remainder from the division of
* fHOST / (MAX_MDIO_FREQ * 2), then we need to add
- * 1 to the clock divisor or we will surely be above 2.5 MHz */
+ * 1 to the clock divisor or we will surely be above 2.5 MHz
+ */
if (host_clock % (MAX_MDIO_FREQ * 2))
clk_div++;
- printk(KERN_DEBUG "%s(): Setting MDIO clock divisor to %u based "
- "on %u Hz host clock.\n", __func__, clk_div, host_clock);
+ netdev_dbg(lp->ndev,
+ "Setting MDIO clock divisor to %u/%u Hz host clock.\n",
+ clk_div, host_clock);
of_node_put(np1);
issue:
diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c
index cc27dea3414e..9956680402de 100644
--- a/drivers/net/fddi/skfp/srf.c
+++ b/drivers/net/fddi/skfp/srf.c
@@ -414,7 +414,7 @@ static void smt_send_srf(struct s_smc *smc)
smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
- DB_SMT("SRF: sending SRF at %x, len %d\n",smt,mb->sm_len) ;
+ DB_SMT("SRF: sending SRF at %p, len %d\n",smt,mb->sm_len) ;
DB_SMT("SRF: state SR%d Threshold %d\n",
smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ;
#ifdef DEBUG
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
new file mode 100644
index 000000000000..78d49d186e05
--- /dev/null
+++ b/drivers/net/geneve.c
@@ -0,0 +1,523 @@
+/*
+ * GENEVE: Generic Network Virtualization Encapsulation
+ *
+ * Copyright (c) 2015 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/hash.h>
+#include <net/rtnetlink.h>
+#include <net/geneve.h>
+
+#define GENEVE_NETDEV_VER "0.6"
+
+#define GENEVE_UDP_PORT 6081
+
+#define GENEVE_N_VID (1u << 24)
+#define GENEVE_VID_MASK (GENEVE_N_VID - 1)
+
+#define VNI_HASH_BITS 10
+#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
+
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
+/* per-network namespace private data for this module */
+struct geneve_net {
+ struct list_head geneve_list;
+ struct hlist_head vni_list[VNI_HASH_SIZE];
+};
+
+/* Pseudo network device */
+struct geneve_dev {
+ struct hlist_node hlist; /* vni hash table */
+ struct net *net; /* netns for packet i/o */
+ struct net_device *dev; /* netdev for geneve tunnel */
+ struct geneve_sock *sock; /* socket used for geneve tunnel */
+ u8 vni[3]; /* virtual network ID for tunnel */
+ u8 ttl; /* TTL override */
+ u8 tos; /* TOS override */
+ struct sockaddr_in remote; /* IPv4 address for link partner */
+ struct list_head next; /* geneve's per namespace list */
+};
+
+static int geneve_net_id;
+
+static inline __u32 geneve_net_vni_hash(u8 vni[3])
+{
+ __u32 vnid;
+
+ vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2];
+ return hash_32(vnid, VNI_HASH_BITS);
+}
+
+/* geneve receive/decap routine */
+static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
+{
+ struct genevehdr *gnvh = geneve_hdr(skb);
+ struct geneve_dev *dummy, *geneve = NULL;
+ struct geneve_net *gn;
+ struct iphdr *iph = NULL;
+ struct pcpu_sw_netstats *stats;
+ struct hlist_head *vni_list_head;
+ int err = 0;
+ __u32 hash;
+
+ iph = ip_hdr(skb); /* Still outer IP header... */
+
+ gn = gs->rcv_data;
+
+ /* Find the device for this VNI */
+ hash = geneve_net_vni_hash(gnvh->vni);
+ vni_list_head = &gn->vni_list[hash];
+ hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) {
+ if (!memcmp(gnvh->vni, dummy->vni, sizeof(dummy->vni)) &&
+ iph->saddr == dummy->remote.sin_addr.s_addr) {
+ geneve = dummy;
+ break;
+ }
+ }
+ if (!geneve)
+ goto drop;
+
+ /* Drop packets w/ critical options,
+ * since we don't support any...
+ */
+ if (gnvh->critical)
+ goto drop;
+
+ skb_reset_mac_header(skb);
+ skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev)));
+ skb->protocol = eth_type_trans(skb, geneve->dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+ /* Ignore packet loops (and multicast echo) */
+ if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
+ goto drop;
+
+ skb_reset_network_header(skb);
+
+ iph = ip_hdr(skb); /* Now inner IP header... */
+ err = IP_ECN_decapsulate(iph, skb);
+
+ if (unlikely(err)) {
+ if (log_ecn_error)
+ net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+ &iph->saddr, iph->tos);
+ if (err > 1) {
+ ++geneve->dev->stats.rx_frame_errors;
+ ++geneve->dev->stats.rx_errors;
+ goto drop;
+ }
+ }
+
+ stats = this_cpu_ptr(geneve->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ netif_rx(skb);
+
+ return;
+drop:
+ /* Consume bad packet */
+ kfree_skb(skb);
+}
+
+/* Setup stats when device is created */
+static int geneve_init(struct net_device *dev)
+{
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void geneve_uninit(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+}
+
+static int geneve_open(struct net_device *dev)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ struct net *net = geneve->net;
+ struct geneve_net *gn = net_generic(geneve->net, geneve_net_id);
+ struct geneve_sock *gs;
+
+ gs = geneve_sock_add(net, htons(GENEVE_UDP_PORT), geneve_rx, gn,
+ false, false);
+ if (IS_ERR(gs))
+ return PTR_ERR(gs);
+
+ geneve->sock = gs;
+
+ return 0;
+}
+
+static int geneve_stop(struct net_device *dev)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ struct geneve_sock *gs = geneve->sock;
+
+ geneve_sock_release(gs);
+
+ return 0;
+}
+
+static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ struct geneve_sock *gs = geneve->sock;
+ struct rtable *rt = NULL;
+ const struct iphdr *iip; /* interior IP header */
+ struct flowi4 fl4;
+ int err;
+ __be16 sport;
+ __u8 tos, ttl;
+
+ iip = ip_hdr(skb);
+
+ skb_reset_mac_header(skb);
+
+ /* TODO: port min/max limits should be configurable */
+ sport = udp_flow_src_port(dev_net(dev), skb, 0, 0, true);
+
+ tos = geneve->tos;
+ if (tos == 1)
+ tos = ip_tunnel_get_dsfield(iip, skb);
+
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_tos = RT_TOS(tos);
+ fl4.daddr = geneve->remote.sin_addr.s_addr;
+ rt = ip_route_output_key(geneve->net, &fl4);
+ if (IS_ERR(rt)) {
+ netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
+ dev->stats.tx_carrier_errors++;
+ goto tx_error;
+ }
+ if (rt->dst.dev == dev) { /* is this necessary? */
+ netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
+ dev->stats.collisions++;
+ goto rt_tx_error;
+ }
+
+ tos = ip_tunnel_ecn_encap(tos, iip, skb);
+
+ ttl = geneve->ttl;
+ if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
+ ttl = 1;
+
+ ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+
+ /* no need to handle local destination and encap bypass...yet... */
+
+ err = geneve_xmit_skb(gs, rt, skb, fl4.saddr, fl4.daddr,
+ tos, ttl, 0, sport, htons(GENEVE_UDP_PORT), 0,
+ geneve->vni, 0, NULL, false,
+ !net_eq(geneve->net, dev_net(geneve->dev)));
+ if (err < 0)
+ ip_rt_put(rt);
+
+ iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+
+ return NETDEV_TX_OK;
+
+rt_tx_error:
+ ip_rt_put(rt);
+tx_error:
+ dev->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops geneve_netdev_ops = {
+ .ndo_init = geneve_init,
+ .ndo_uninit = geneve_uninit,
+ .ndo_open = geneve_open,
+ .ndo_stop = geneve_stop,
+ .ndo_start_xmit = geneve_xmit,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static void geneve_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version));
+ strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver));
+}
+
+static const struct ethtool_ops geneve_ethtool_ops = {
+ .get_drvinfo = geneve_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+/* Info for udev, that this is a virtual tunnel endpoint */
+static struct device_type geneve_type = {
+ .name = "geneve",
+};
+
+/* Initialize the device structure. */
+static void geneve_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->netdev_ops = &geneve_netdev_ops;
+ dev->ethtool_ops = &geneve_ethtool_ops;
+ dev->destructor = free_netdev;
+
+ SET_NETDEV_DEVTYPE(dev, &geneve_type);
+
+ dev->tx_queue_len = 0;
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+
+ dev->vlan_features = dev->features;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+
+ netif_keep_dst(dev);
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+}
+
+static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
+ [IFLA_GENEVE_ID] = { .type = NLA_U32 },
+ [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+ [IFLA_GENEVE_TTL] = { .type = NLA_U8 },
+ [IFLA_GENEVE_TOS] = { .type = NLA_U8 },
+};
+
+static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+
+ if (!data)
+ return -EINVAL;
+
+ if (data[IFLA_GENEVE_ID]) {
+ __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]);
+
+ if (vni >= GENEVE_VID_MASK)
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int geneve_newlink(struct net *net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
+ struct geneve_dev *dummy, *geneve = netdev_priv(dev);
+ struct hlist_head *vni_list_head;
+ struct sockaddr_in remote; /* IPv4 address for link partner */
+ __u32 vni, hash;
+ int err;
+
+ if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE])
+ return -EINVAL;
+
+ geneve->net = net;
+ geneve->dev = dev;
+
+ vni = nla_get_u32(data[IFLA_GENEVE_ID]);
+ geneve->vni[0] = (vni & 0x00ff0000) >> 16;
+ geneve->vni[1] = (vni & 0x0000ff00) >> 8;
+ geneve->vni[2] = vni & 0x000000ff;
+
+ geneve->remote.sin_addr.s_addr =
+ nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+ if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr)))
+ return -EINVAL;
+
+ remote = geneve->remote;
+ hash = geneve_net_vni_hash(geneve->vni);
+ vni_list_head = &gn->vni_list[hash];
+ hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) {
+ if (!memcmp(geneve->vni, dummy->vni, sizeof(dummy->vni)) &&
+ !memcmp(&remote, &dummy->remote, sizeof(dummy->remote)))
+ return -EBUSY;
+ }
+
+ if (tb[IFLA_ADDRESS] == NULL)
+ eth_hw_addr_random(dev);
+
+ err = register_netdevice(dev);
+ if (err)
+ return err;
+
+ if (data[IFLA_GENEVE_TTL])
+ geneve->ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
+
+ if (data[IFLA_GENEVE_TOS])
+ geneve->tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
+
+ list_add(&geneve->next, &gn->geneve_list);
+
+ hlist_add_head_rcu(&geneve->hlist, &gn->vni_list[hash]);
+
+ return 0;
+}
+
+static void geneve_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+
+ if (!hlist_unhashed(&geneve->hlist))
+ hlist_del_rcu(&geneve->hlist);
+
+ list_del(&geneve->next);
+ unregister_netdevice_queue(dev, head);
+}
+
+static size_t geneve_get_size(const struct net_device *dev)
+{
+ return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */
+ nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
+ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
+ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
+ 0;
+}
+
+static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ __u32 vni;
+
+ vni = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2];
+ if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
+ goto nla_put_failure;
+
+ if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
+ geneve->remote.sin_addr.s_addr))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) ||
+ nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops geneve_link_ops __read_mostly = {
+ .kind = "geneve",
+ .maxtype = IFLA_GENEVE_MAX,
+ .policy = geneve_policy,
+ .priv_size = sizeof(struct geneve_dev),
+ .setup = geneve_setup,
+ .validate = geneve_validate,
+ .newlink = geneve_newlink,
+ .dellink = geneve_dellink,
+ .get_size = geneve_get_size,
+ .fill_info = geneve_fill_info,
+};
+
+static __net_init int geneve_init_net(struct net *net)
+{
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
+ unsigned int h;
+
+ INIT_LIST_HEAD(&gn->geneve_list);
+
+ for (h = 0; h < VNI_HASH_SIZE; ++h)
+ INIT_HLIST_HEAD(&gn->vni_list[h]);
+
+ return 0;
+}
+
+static void __net_exit geneve_exit_net(struct net *net)
+{
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
+ struct geneve_dev *geneve, *next;
+ struct net_device *dev, *aux;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+
+ /* gather any geneve devices that were moved into this ns */
+ for_each_netdev_safe(net, dev, aux)
+ if (dev->rtnl_link_ops == &geneve_link_ops)
+ unregister_netdevice_queue(dev, &list);
+
+ /* now gather any other geneve devices that were created in this ns */
+ list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
+ /* If geneve->dev is in the same netns, it was already added
+ * to the list by the previous loop.
+ */
+ if (!net_eq(dev_net(geneve->dev), net))
+ unregister_netdevice_queue(geneve->dev, &list);
+ }
+
+ /* unregister the devices gathered above */
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
+static struct pernet_operations geneve_net_ops = {
+ .init = geneve_init_net,
+ .exit = geneve_exit_net,
+ .id = &geneve_net_id,
+ .size = sizeof(struct geneve_net),
+};
+
+static int __init geneve_init_module(void)
+{
+ int rc;
+
+ rc = register_pernet_subsys(&geneve_net_ops);
+ if (rc)
+ goto out1;
+
+ rc = rtnl_link_register(&geneve_link_ops);
+ if (rc)
+ goto out2;
+
+ return 0;
+out2:
+ unregister_pernet_subsys(&geneve_net_ops);
+out1:
+ return rc;
+}
+late_initcall(geneve_init_module);
+
+static void __exit geneve_cleanup_module(void)
+{
+ rtnl_link_unregister(&geneve_link_ops);
+ unregister_pernet_subsys(&geneve_net_ops);
+}
+module_exit(geneve_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(GENEVE_NETDEV_VER);
+MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>");
+MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic");
+MODULE_ALIAS_RTNL_LINK("geneve");
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 41071d32bc8e..dd4544085db3 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -161,6 +161,7 @@ struct netvsc_device_info {
unsigned char mac_adr[ETH_ALEN];
bool link_state; /* 0 - link up, 1 - link down */
int ring_size;
+ u32 max_num_vrss_chns;
};
enum rndis_device_state {
@@ -611,6 +612,12 @@ struct multi_send_data {
u32 count; /* counter of batched packets */
};
+struct netvsc_stats {
+ u64 packets;
+ u64 bytes;
+ struct u64_stats_sync syncp;
+};
+
/* The context of the netvsc device */
struct net_device_context {
/* point back to our device context */
@@ -618,6 +625,9 @@ struct net_device_context {
struct delayed_work dwork;
struct work_struct work;
u32 msg_enable; /* debug level */
+
+ struct netvsc_stats __percpu *tx_stats;
+ struct netvsc_stats __percpu *rx_stats;
};
/* Per netvsc device */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index ea091bc5ff09..06de98a05622 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -227,13 +227,18 @@ static int netvsc_init_buf(struct hv_device *device)
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
struct net_device *ndev;
+ int node;
net_device = get_outbound_net_device(device);
if (!net_device)
return -ENODEV;
ndev = net_device->ndev;
- net_device->recv_buf = vzalloc(net_device->recv_buf_size);
+ node = cpu_to_node(device->channel->target_cpu);
+ net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
+ if (!net_device->recv_buf)
+ net_device->recv_buf = vzalloc(net_device->recv_buf_size);
+
if (!net_device->recv_buf) {
netdev_err(ndev, "unable to allocate receive "
"buffer of size %d\n", net_device->recv_buf_size);
@@ -321,7 +326,9 @@ static int netvsc_init_buf(struct hv_device *device)
/* Now setup the send buffer.
*/
- net_device->send_buf = vzalloc(net_device->send_buf_size);
+ net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
+ if (!net_device->send_buf)
+ net_device->send_buf = vzalloc(net_device->send_buf_size);
if (!net_device->send_buf) {
netdev_err(ndev, "unable to allocate send "
"buffer of size %d\n", net_device->send_buf_size);
@@ -743,6 +750,7 @@ static inline int netvsc_send_pkt(
u64 req_id;
int ret;
struct hv_page_buffer *pgbuf;
+ u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
if (packet->is_data_pkt) {
@@ -769,32 +777,42 @@ static inline int netvsc_send_pkt(
if (out_channel->rescind)
return -ENODEV;
+ /*
+ * It is possible that once we successfully place this packet
+ * on the ringbuffer, we may stop the queue. In that case, we want
+ * to notify the host independent of the xmit_more flag. We don't
+ * need to be precise here; in the worst case we may signal the host
+ * unnecessarily.
+ */
+ if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
+ packet->xmit_more = false;
+
if (packet->page_buf_cnt) {
pgbuf = packet->cp_partial ? packet->page_buf +
packet->rmsg_pgcnt : packet->page_buf;
- ret = vmbus_sendpacket_pagebuffer(out_channel,
- pgbuf,
- packet->page_buf_cnt,
- &nvmsg,
- sizeof(struct nvsp_message),
- req_id);
+ ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
+ pgbuf,
+ packet->page_buf_cnt,
+ &nvmsg,
+ sizeof(struct nvsp_message),
+ req_id,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
+ !packet->xmit_more);
} else {
- ret = vmbus_sendpacket(
- out_channel, &nvmsg,
- sizeof(struct nvsp_message),
- req_id,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
+ sizeof(struct nvsp_message),
+ req_id,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
+ !packet->xmit_more);
}
if (ret == 0) {
atomic_inc(&net_device->num_outstanding_sends);
atomic_inc(&net_device->queue_sends[q_idx]);
- if (hv_ringbuf_avail_percent(&out_channel->outbound) <
- RING_AVAIL_PERCENT_LOWATER) {
- netif_tx_stop_queue(netdev_get_tx_queue(
- ndev, q_idx));
+ if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
+ netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
if (atomic_read(&net_device->
queue_sends[q_idx]) < 1)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5993c7e2d723..358475ed9b59 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -46,6 +46,8 @@ static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
+static int max_num_vrss_chns = 8;
+
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
@@ -196,12 +198,12 @@ static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
struct flow_keys flow;
int data_len;
- if (!skb_flow_dissect(skb, &flow) ||
- !(flow.n_proto == htons(ETH_P_IP) ||
- flow.n_proto == htons(ETH_P_IPV6)))
+ if (!skb_flow_dissect_flow_keys(skb, &flow) ||
+ !(flow.basic.n_proto == htons(ETH_P_IP) ||
+ flow.basic.n_proto == htons(ETH_P_IPV6)))
return false;
- if (flow.ip_proto == IPPROTO_TCP)
+ if (flow.basic.ip_proto == IPPROTO_TCP)
data_len = 12;
else
data_len = 8;
@@ -391,7 +393,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
u32 skb_length;
u32 pkt_sz;
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
-
+ struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
/* We will atmost need two pages to describe the rndis
* header. We can only transmit MAX_PAGE_BUFFER_COUNT number
@@ -580,8 +582,10 @@ do_send:
drop:
if (ret == 0) {
- net->stats.tx_bytes += skb_length;
- net->stats.tx_packets++;
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->packets++;
+ tx_stats->bytes += skb_length;
+ u64_stats_update_end(&tx_stats->syncp);
} else {
if (ret != -EAGAIN) {
dev_kfree_skb_any(skb);
@@ -644,13 +648,17 @@ int netvsc_recv_callback(struct hv_device *device_obj,
struct ndis_tcp_ip_checksum_info *csum_info)
{
struct net_device *net;
+ struct net_device_context *net_device_ctx;
struct sk_buff *skb;
+ struct netvsc_stats *rx_stats;
net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
if (!net || net->reg_state != NETREG_REGISTERED) {
packet->status = NVSP_STAT_FAIL;
return 0;
}
+ net_device_ctx = netdev_priv(net);
+ rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
/* Allocate a skb - TODO direct I/O to pages? */
skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
@@ -686,8 +694,10 @@ int netvsc_recv_callback(struct hv_device *device_obj,
skb_record_rx_queue(skb, packet->channel->
offermsg.offer.sub_channel_index);
- net->stats.rx_packets++;
- net->stats.rx_bytes += packet->total_data_buflen;
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += packet->total_data_buflen;
+ u64_stats_update_end(&rx_stats->syncp);
/*
* Pass the skb back up. Network stack will deallocate the skb when it
@@ -747,12 +757,53 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
ndevctx->device_ctx = hdev;
hv_set_drvdata(hdev, ndev);
device_info.ring_size = ring_size;
+ device_info.max_num_vrss_chns = max_num_vrss_chns;
rndis_filter_device_add(hdev, &device_info);
netif_tx_wake_all_queues(ndev);
return 0;
}
+static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
+ struct rtnl_link_stats64 *t)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
+ cpu);
+ struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
+ cpu);
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ tx_packets = tx_stats->packets;
+ tx_bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ rx_packets = rx_stats->packets;
+ rx_bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+
+ t->tx_bytes += tx_bytes;
+ t->tx_packets += tx_packets;
+ t->rx_bytes += rx_bytes;
+ t->rx_packets += rx_packets;
+ }
+
+ t->tx_dropped = net->stats.tx_dropped;
+ t->tx_errors = net->stats.tx_dropped;
+
+ t->rx_dropped = net->stats.rx_dropped;
+ t->rx_errors = net->stats.rx_errors;
+
+ return t;
+}
static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
{
@@ -804,6 +855,7 @@ static const struct net_device_ops device_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = netvsc_set_mac_addr,
.ndo_select_queue = netvsc_select_queue,
+ .ndo_get_stats64 = netvsc_get_stats64,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = netvsc_poll_controller,
#endif
@@ -855,6 +907,14 @@ static void netvsc_link_change(struct work_struct *w)
netdev_notify_peers(net);
}
+static void netvsc_free_netdev(struct net_device *netdev)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(netdev);
+
+ free_percpu(net_device_ctx->tx_stats);
+ free_percpu(net_device_ctx->rx_stats);
+ free_netdev(netdev);
+}
static int netvsc_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
@@ -883,6 +943,18 @@ static int netvsc_probe(struct hv_device *dev,
netdev_dbg(net, "netvsc msg_enable: %d\n",
net_device_ctx->msg_enable);
+ net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
+ if (!net_device_ctx->tx_stats) {
+ free_netdev(net);
+ return -ENOMEM;
+ }
+ net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
+ if (!net_device_ctx->rx_stats) {
+ free_percpu(net_device_ctx->tx_stats);
+ free_netdev(net);
+ return -ENOMEM;
+ }
+
hv_set_drvdata(dev, net);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
@@ -906,10 +978,11 @@ static int netvsc_probe(struct hv_device *dev,
/* Notify the netvsc driver of the new device */
device_info.ring_size = ring_size;
+ device_info.max_num_vrss_chns = max_num_vrss_chns;
ret = rndis_filter_device_add(dev, &device_info);
if (ret != 0) {
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
- free_netdev(net);
+ netvsc_free_netdev(net);
hv_set_drvdata(dev, NULL);
return ret;
}
@@ -923,7 +996,7 @@ static int netvsc_probe(struct hv_device *dev,
if (ret != 0) {
pr_err("Unable to register netdev.\n");
rndis_filter_device_remove(dev);
- free_netdev(net);
+ netvsc_free_netdev(net);
} else {
schedule_delayed_work(&net_device_ctx->dwork, 0);
}
@@ -962,7 +1035,7 @@ static int netvsc_remove(struct hv_device *dev)
*/
rndis_filter_device_remove(dev);
- free_netdev(net);
+ netvsc_free_netdev(net);
return 0;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 9118cea91882..006c1b8c2385 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1013,6 +1013,9 @@ int rndis_filter_device_add(struct hv_device *dev,
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
u32 mtu, size;
+ u32 num_rss_qs;
+ const struct cpumask *node_cpu_mask;
+ u32 num_possible_rss_qs;
rndis_device = get_rndis_device();
if (!rndis_device)
@@ -1100,9 +1103,18 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret || rsscap.num_recv_que < 2)
goto out;
+ num_rss_qs = min(device_info->max_num_vrss_chns, rsscap.num_recv_que);
+
net_device->max_chn = rsscap.num_recv_que;
- net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ?
- num_online_cpus() : rsscap.num_recv_que;
+
+ /*
+ * We will limit the VRSS channels to the number CPUs in the NUMA node
+ * the primary channel is currently bound to.
+ */
+ node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
+ num_possible_rss_qs = cpumask_weight(node_cpu_mask);
+ net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
if (net_device->num_chn == 1)
goto out;
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 1a3c3e57aa0b..1dd5ab8e5054 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -53,3 +53,13 @@ config IEEE802154_CC2520
This driver can also be built as a module. To do so, say M here.
the module will be called 'cc2520'.
+
+config IEEE802154_ATUSB
+ tristate "ATUSB transceiver driver"
+ depends on IEEE802154_DRIVERS && MAC802154 && USB
+ ---help---
+ Say Y here to enable the ATUSB IEEE 802.15.4 wireless
+ controller.
+
+ This driver can also be built as a module. To do so say M here.
+ The module will be called 'atusb'.
diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
index d77fa4d77e27..cf1d2a6db023 100644
--- a/drivers/net/ieee802154/Makefile
+++ b/drivers/net/ieee802154/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o
+obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 67d00fbc2e0e..2f25a5ed8247 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -35,6 +35,8 @@
#include <net/mac802154.h>
#include <net/cfg802154.h>
+#include "at86rf230.h"
+
struct at86rf230_local;
/* at86rf2xx chip depend data.
* All timings are in us.
@@ -50,7 +52,7 @@ struct at86rf2xx_chip_data {
int rssi_base_val;
int (*set_channel)(struct at86rf230_local *, u8, u8);
- int (*get_desense_steps)(struct at86rf230_local *, s32);
+ int (*set_txpower)(struct at86rf230_local *, s32);
};
#define AT86RF2XX_MAX_BUF (127 + 3)
@@ -102,200 +104,6 @@ struct at86rf230_local {
struct at86rf230_state_change tx;
};
-#define RG_TRX_STATUS (0x01)
-#define SR_TRX_STATUS 0x01, 0x1f, 0
-#define SR_RESERVED_01_3 0x01, 0x20, 5
-#define SR_CCA_STATUS 0x01, 0x40, 6
-#define SR_CCA_DONE 0x01, 0x80, 7
-#define RG_TRX_STATE (0x02)
-#define SR_TRX_CMD 0x02, 0x1f, 0
-#define SR_TRAC_STATUS 0x02, 0xe0, 5
-#define RG_TRX_CTRL_0 (0x03)
-#define SR_CLKM_CTRL 0x03, 0x07, 0
-#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
-#define SR_PAD_IO_CLKM 0x03, 0x30, 4
-#define SR_PAD_IO 0x03, 0xc0, 6
-#define RG_TRX_CTRL_1 (0x04)
-#define SR_IRQ_POLARITY 0x04, 0x01, 0
-#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
-#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
-#define SR_RX_BL_CTRL 0x04, 0x10, 4
-#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
-#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
-#define SR_PA_EXT_EN 0x04, 0x80, 7
-#define RG_PHY_TX_PWR (0x05)
-#define SR_TX_PWR 0x05, 0x0f, 0
-#define SR_PA_LT 0x05, 0x30, 4
-#define SR_PA_BUF_LT 0x05, 0xc0, 6
-#define RG_PHY_RSSI (0x06)
-#define SR_RSSI 0x06, 0x1f, 0
-#define SR_RND_VALUE 0x06, 0x60, 5
-#define SR_RX_CRC_VALID 0x06, 0x80, 7
-#define RG_PHY_ED_LEVEL (0x07)
-#define SR_ED_LEVEL 0x07, 0xff, 0
-#define RG_PHY_CC_CCA (0x08)
-#define SR_CHANNEL 0x08, 0x1f, 0
-#define SR_CCA_MODE 0x08, 0x60, 5
-#define SR_CCA_REQUEST 0x08, 0x80, 7
-#define RG_CCA_THRES (0x09)
-#define SR_CCA_ED_THRES 0x09, 0x0f, 0
-#define SR_RESERVED_09_1 0x09, 0xf0, 4
-#define RG_RX_CTRL (0x0a)
-#define SR_PDT_THRES 0x0a, 0x0f, 0
-#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
-#define RG_SFD_VALUE (0x0b)
-#define SR_SFD_VALUE 0x0b, 0xff, 0
-#define RG_TRX_CTRL_2 (0x0c)
-#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
-#define SR_SUB_MODE 0x0c, 0x04, 2
-#define SR_BPSK_QPSK 0x0c, 0x08, 3
-#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4
-#define SR_RESERVED_0c_5 0x0c, 0x60, 5
-#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
-#define RG_ANT_DIV (0x0d)
-#define SR_ANT_CTRL 0x0d, 0x03, 0
-#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
-#define SR_ANT_DIV_EN 0x0d, 0x08, 3
-#define SR_RESERVED_0d_2 0x0d, 0x70, 4
-#define SR_ANT_SEL 0x0d, 0x80, 7
-#define RG_IRQ_MASK (0x0e)
-#define SR_IRQ_MASK 0x0e, 0xff, 0
-#define RG_IRQ_STATUS (0x0f)
-#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
-#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
-#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
-#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
-#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
-#define SR_IRQ_5_AMI 0x0f, 0x20, 5
-#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
-#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
-#define RG_VREG_CTRL (0x10)
-#define SR_RESERVED_10_6 0x10, 0x03, 0
-#define SR_DVDD_OK 0x10, 0x04, 2
-#define SR_DVREG_EXT 0x10, 0x08, 3
-#define SR_RESERVED_10_3 0x10, 0x30, 4
-#define SR_AVDD_OK 0x10, 0x40, 6
-#define SR_AVREG_EXT 0x10, 0x80, 7
-#define RG_BATMON (0x11)
-#define SR_BATMON_VTH 0x11, 0x0f, 0
-#define SR_BATMON_HR 0x11, 0x10, 4
-#define SR_BATMON_OK 0x11, 0x20, 5
-#define SR_RESERVED_11_1 0x11, 0xc0, 6
-#define RG_XOSC_CTRL (0x12)
-#define SR_XTAL_TRIM 0x12, 0x0f, 0
-#define SR_XTAL_MODE 0x12, 0xf0, 4
-#define RG_RX_SYN (0x15)
-#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
-#define SR_RESERVED_15_2 0x15, 0x70, 4
-#define SR_RX_PDT_DIS 0x15, 0x80, 7
-#define RG_XAH_CTRL_1 (0x17)
-#define SR_RESERVED_17_8 0x17, 0x01, 0
-#define SR_AACK_PROM_MODE 0x17, 0x02, 1
-#define SR_AACK_ACK_TIME 0x17, 0x04, 2
-#define SR_RESERVED_17_5 0x17, 0x08, 3
-#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
-#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
-#define SR_CSMA_LBT_MODE 0x17, 0x40, 6
-#define SR_RESERVED_17_1 0x17, 0x80, 7
-#define RG_FTN_CTRL (0x18)
-#define SR_RESERVED_18_2 0x18, 0x7f, 0
-#define SR_FTN_START 0x18, 0x80, 7
-#define RG_PLL_CF (0x1a)
-#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
-#define SR_PLL_CF_START 0x1a, 0x80, 7
-#define RG_PLL_DCU (0x1b)
-#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
-#define SR_RESERVED_1b_2 0x1b, 0x40, 6
-#define SR_PLL_DCU_START 0x1b, 0x80, 7
-#define RG_PART_NUM (0x1c)
-#define SR_PART_NUM 0x1c, 0xff, 0
-#define RG_VERSION_NUM (0x1d)
-#define SR_VERSION_NUM 0x1d, 0xff, 0
-#define RG_MAN_ID_0 (0x1e)
-#define SR_MAN_ID_0 0x1e, 0xff, 0
-#define RG_MAN_ID_1 (0x1f)
-#define SR_MAN_ID_1 0x1f, 0xff, 0
-#define RG_SHORT_ADDR_0 (0x20)
-#define SR_SHORT_ADDR_0 0x20, 0xff, 0
-#define RG_SHORT_ADDR_1 (0x21)
-#define SR_SHORT_ADDR_1 0x21, 0xff, 0
-#define RG_PAN_ID_0 (0x22)
-#define SR_PAN_ID_0 0x22, 0xff, 0
-#define RG_PAN_ID_1 (0x23)
-#define SR_PAN_ID_1 0x23, 0xff, 0
-#define RG_IEEE_ADDR_0 (0x24)
-#define SR_IEEE_ADDR_0 0x24, 0xff, 0
-#define RG_IEEE_ADDR_1 (0x25)
-#define SR_IEEE_ADDR_1 0x25, 0xff, 0
-#define RG_IEEE_ADDR_2 (0x26)
-#define SR_IEEE_ADDR_2 0x26, 0xff, 0
-#define RG_IEEE_ADDR_3 (0x27)
-#define SR_IEEE_ADDR_3 0x27, 0xff, 0
-#define RG_IEEE_ADDR_4 (0x28)
-#define SR_IEEE_ADDR_4 0x28, 0xff, 0
-#define RG_IEEE_ADDR_5 (0x29)
-#define SR_IEEE_ADDR_5 0x29, 0xff, 0
-#define RG_IEEE_ADDR_6 (0x2a)
-#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
-#define RG_IEEE_ADDR_7 (0x2b)
-#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
-#define RG_XAH_CTRL_0 (0x2c)
-#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
-#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
-#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
-#define RG_CSMA_SEED_0 (0x2d)
-#define SR_CSMA_SEED_0 0x2d, 0xff, 0
-#define RG_CSMA_SEED_1 (0x2e)
-#define SR_CSMA_SEED_1 0x2e, 0x07, 0
-#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
-#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
-#define SR_AACK_SET_PD 0x2e, 0x20, 5
-#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
-#define RG_CSMA_BE (0x2f)
-#define SR_MIN_BE 0x2f, 0x0f, 0
-#define SR_MAX_BE 0x2f, 0xf0, 4
-
-#define CMD_REG 0x80
-#define CMD_REG_MASK 0x3f
-#define CMD_WRITE 0x40
-#define CMD_FB 0x20
-
-#define IRQ_BAT_LOW (1 << 7)
-#define IRQ_TRX_UR (1 << 6)
-#define IRQ_AMI (1 << 5)
-#define IRQ_CCA_ED (1 << 4)
-#define IRQ_TRX_END (1 << 3)
-#define IRQ_RX_START (1 << 2)
-#define IRQ_PLL_UNL (1 << 1)
-#define IRQ_PLL_LOCK (1 << 0)
-
-#define IRQ_ACTIVE_HIGH 0
-#define IRQ_ACTIVE_LOW 1
-
-#define STATE_P_ON 0x00 /* BUSY */
-#define STATE_BUSY_RX 0x01
-#define STATE_BUSY_TX 0x02
-#define STATE_FORCE_TRX_OFF 0x03
-#define STATE_FORCE_TX_ON 0x04 /* IDLE */
-/* 0x05 */ /* INVALID_PARAMETER */
-#define STATE_RX_ON 0x06
-/* 0x07 */ /* SUCCESS */
-#define STATE_TRX_OFF 0x08
-#define STATE_TX_ON 0x09
-/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
-#define STATE_SLEEP 0x0F
-#define STATE_PREP_DEEP_SLEEP 0x10
-#define STATE_BUSY_RX_AACK 0x11
-#define STATE_BUSY_TX_ARET 0x12
-#define STATE_RX_AACK_ON 0x16
-#define STATE_TX_ARET_ON 0x19
-#define STATE_RX_ON_NOCLK 0x1C
-#define STATE_RX_AACK_ON_NOCLK 0x1D
-#define STATE_BUSY_RX_AACK_NOCLK 0x1E
-#define STATE_TRANSITION_IN_PROGRESS 0x1F
-
-#define TRX_STATE_MASK (0x1F)
-
#define AT86RF2XX_NUMREGS 0x3F
static void
@@ -1010,7 +818,7 @@ at86rf230_xmit_start(void *context)
if (lp->is_tx_from_off) {
lp->is_tx_from_off = false;
at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
- at86rf230_xmit_tx_on,
+ at86rf230_write_frame,
false);
} else {
at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
@@ -1076,6 +884,50 @@ at86rf23x_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
}
+#define AT86RF2XX_MAX_ED_LEVELS 0xF
+static const s32 at86rf23x_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+ -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300,
+ -7100, -6900, -6700, -6500, -6300, -6100,
+};
+
+static const s32 at86rf212_ed_levels_100[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+ -10000, -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200,
+ -8000, -7800, -7600, -7400, -7200, -7000,
+};
+
+static const s32 at86rf212_ed_levels_98[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+ -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000,
+ -7800, -7600, -7400, -7200, -7000, -6800,
+};
+
+static inline int
+at86rf212_update_cca_ed_level(struct at86rf230_local *lp, int rssi_base_val)
+{
+ unsigned int cca_ed_thres;
+ int rc;
+
+ rc = at86rf230_read_subreg(lp, SR_CCA_ED_THRES, &cca_ed_thres);
+ if (rc < 0)
+ return rc;
+
+ switch (rssi_base_val) {
+ case -98:
+ lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_98;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_98);
+ lp->hw->phy->cca_ed_level = at86rf212_ed_levels_98[cca_ed_thres];
+ break;
+ case -100:
+ lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
+ lp->hw->phy->cca_ed_level = at86rf212_ed_levels_100[cca_ed_thres];
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return 0;
+}
+
static int
at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
{
@@ -1098,6 +950,10 @@ at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
if (rc < 0)
return rc;
+ rc = at86rf212_update_cca_ed_level(lp, lp->data->rssi_base_val);
+ if (rc < 0)
+ return rc;
+
/* This sets the symbol_duration according frequency on the 212.
* TODO move this handling while set channel and page in cfg802154.
* We can do that, this timings are according 802.15.4 standard.
@@ -1193,23 +1049,56 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
return 0;
}
+#define AT86RF23X_MAX_TX_POWERS 0xF
+static const s32 at86rf233_powers[AT86RF23X_MAX_TX_POWERS + 1] = {
+ 400, 370, 340, 300, 250, 200, 100, 0, -100, -200, -300, -400, -600,
+ -800, -1200, -1700,
+};
+
+static const s32 at86rf231_powers[AT86RF23X_MAX_TX_POWERS + 1] = {
+ 300, 280, 230, 180, 130, 70, 0, -100, -200, -300, -400, -500, -700,
+ -900, -1200, -1700,
+};
+
+#define AT86RF212_MAX_TX_POWERS 0x1F
+static const s32 at86rf212_powers[AT86RF212_MAX_TX_POWERS + 1] = {
+ 500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700,
+ -800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700,
+ -1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600,
+};
+
+static int
+at86rf23x_set_txpower(struct at86rf230_local *lp, s32 mbm)
+{
+ u32 i;
+
+ for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
+ if (lp->hw->phy->supported.tx_powers[i] == mbm)
+ return at86rf230_write_subreg(lp, SR_TX_PWR_23X, i);
+ }
+
+ return -EINVAL;
+}
+
static int
-at86rf230_set_txpower(struct ieee802154_hw *hw, s8 db)
+at86rf212_set_txpower(struct at86rf230_local *lp, s32 mbm)
{
- struct at86rf230_local *lp = hw->priv;
+ u32 i;
- /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five
- * bits decrease power in 1dB steps. 0x60 represents extra PA gain of
- * 0dB.
- * thus, supported values for db range from -26 to 5, for 31dB of
- * reduction to 0dB of reduction.
- */
- if (db > 5 || db < -26)
- return -EINVAL;
+ for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
+ if (lp->hw->phy->supported.tx_powers[i] == mbm)
+ return at86rf230_write_subreg(lp, SR_TX_PWR_212, i);
+ }
- db = -(db - 5);
+ return -EINVAL;
+}
+
+static int
+at86rf230_set_txpower(struct ieee802154_hw *hw, s32 mbm)
+{
+ struct at86rf230_local *lp = hw->priv;
- return __at86rf230_write(lp, RG_PHY_TX_PWR, 0x60 | db);
+ return lp->data->set_txpower(lp, mbm);
}
static int
@@ -1254,28 +1143,19 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
}
-static int
-at86rf212_get_desens_steps(struct at86rf230_local *lp, s32 level)
-{
- return (level - lp->data->rssi_base_val) * 100 / 207;
-}
-
-static int
-at86rf23x_get_desens_steps(struct at86rf230_local *lp, s32 level)
-{
- return (level - lp->data->rssi_base_val) / 2;
-}
static int
-at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 level)
+at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
{
struct at86rf230_local *lp = hw->priv;
+ u32 i;
- if (level < lp->data->rssi_base_val || level > 30)
- return -EINVAL;
+ for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
+ if (hw->phy->supported.cca_ed_levels[i] == mbm)
+ return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, i);
+ }
- return at86rf230_write_subreg(lp, SR_CCA_ED_THRES,
- lp->data->get_desense_steps(lp, level));
+ return -EINVAL;
}
static int
@@ -1365,7 +1245,7 @@ static struct at86rf2xx_chip_data at86rf233_data = {
.t_p_ack = 545,
.rssi_base_val = -91,
.set_channel = at86rf23x_set_channel,
- .get_desense_steps = at86rf23x_get_desens_steps
+ .set_txpower = at86rf23x_set_txpower,
};
static struct at86rf2xx_chip_data at86rf231_data = {
@@ -1378,7 +1258,7 @@ static struct at86rf2xx_chip_data at86rf231_data = {
.t_p_ack = 545,
.rssi_base_val = -91,
.set_channel = at86rf23x_set_channel,
- .get_desense_steps = at86rf23x_get_desens_steps
+ .set_txpower = at86rf23x_set_txpower,
};
static struct at86rf2xx_chip_data at86rf212_data = {
@@ -1391,7 +1271,7 @@ static struct at86rf2xx_chip_data at86rf212_data = {
.t_p_ack = 545,
.rssi_base_val = -100,
.set_channel = at86rf212_set_channel,
- .get_desense_steps = at86rf212_get_desens_steps
+ .set_txpower = at86rf212_set_txpower,
};
static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim)
@@ -1564,8 +1444,21 @@ at86rf230_detect_device(struct at86rf230_local *lp)
}
lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK |
- IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET |
- IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS;
+ IEEE802154_HW_CSMA_PARAMS |
+ IEEE802154_HW_FRAME_RETRIES | IEEE802154_HW_AFILT |
+ IEEE802154_HW_PROMISCUOUS;
+
+ lp->hw->phy->flags = WPAN_PHY_FLAG_TXPOWER |
+ WPAN_PHY_FLAG_CCA_ED_LEVEL |
+ WPAN_PHY_FLAG_CCA_MODE;
+
+ lp->hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
+ BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
+ lp->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
+ BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
+
+ lp->hw->phy->supported.cca_ed_levels = at86rf23x_ed_levels;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf23x_ed_levels);
lp->hw->phy->cca.mode = NL802154_CCA_ENERGY;
@@ -1573,36 +1466,49 @@ at86rf230_detect_device(struct at86rf230_local *lp)
case 2:
chip = "at86rf230";
rc = -ENOTSUPP;
- break;
+ goto not_supp;
case 3:
chip = "at86rf231";
lp->data = &at86rf231_data;
- lp->hw->phy->channels_supported[0] = 0x7FFF800;
+ lp->hw->phy->supported.channels[0] = 0x7FFF800;
lp->hw->phy->current_channel = 11;
lp->hw->phy->symbol_duration = 16;
+ lp->hw->phy->supported.tx_powers = at86rf231_powers;
+ lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers);
break;
case 7:
chip = "at86rf212";
lp->data = &at86rf212_data;
lp->hw->flags |= IEEE802154_HW_LBT;
- lp->hw->phy->channels_supported[0] = 0x00007FF;
- lp->hw->phy->channels_supported[2] = 0x00007FF;
+ lp->hw->phy->supported.channels[0] = 0x00007FF;
+ lp->hw->phy->supported.channels[2] = 0x00007FF;
lp->hw->phy->current_channel = 5;
lp->hw->phy->symbol_duration = 25;
+ lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH;
+ lp->hw->phy->supported.tx_powers = at86rf212_powers;
+ lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers);
+ lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
break;
case 11:
chip = "at86rf233";
lp->data = &at86rf233_data;
- lp->hw->phy->channels_supported[0] = 0x7FFF800;
+ lp->hw->phy->supported.channels[0] = 0x7FFF800;
lp->hw->phy->current_channel = 13;
lp->hw->phy->symbol_duration = 16;
+ lp->hw->phy->supported.tx_powers = at86rf233_powers;
+ lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers);
break;
default:
chip = "unknown";
rc = -ENOTSUPP;
- break;
+ goto not_supp;
}
+ lp->hw->phy->cca_ed_level = lp->hw->phy->supported.cca_ed_levels[7];
+ lp->hw->phy->transmit_power = lp->hw->phy->supported.tx_powers[0];
+
+not_supp:
dev_info(&lp->spi->dev, "Detected %s chip version %d\n", chip, version);
return rc;
diff --git a/drivers/net/ieee802154/at86rf230.h b/drivers/net/ieee802154/at86rf230.h
new file mode 100644
index 000000000000..1e6d1cc677f6
--- /dev/null
+++ b/drivers/net/ieee802154/at86rf230.h
@@ -0,0 +1,220 @@
+/*
+ * AT86RF230/RF231 driver
+ *
+ * Copyright (C) 2009-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#ifndef _AT86RF230_H
+#define _AT86RF230_H
+
+#define RG_TRX_STATUS (0x01)
+#define SR_TRX_STATUS 0x01, 0x1f, 0
+#define SR_RESERVED_01_3 0x01, 0x20, 5
+#define SR_CCA_STATUS 0x01, 0x40, 6
+#define SR_CCA_DONE 0x01, 0x80, 7
+#define RG_TRX_STATE (0x02)
+#define SR_TRX_CMD 0x02, 0x1f, 0
+#define SR_TRAC_STATUS 0x02, 0xe0, 5
+#define RG_TRX_CTRL_0 (0x03)
+#define SR_CLKM_CTRL 0x03, 0x07, 0
+#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
+#define SR_PAD_IO_CLKM 0x03, 0x30, 4
+#define SR_PAD_IO 0x03, 0xc0, 6
+#define RG_TRX_CTRL_1 (0x04)
+#define SR_IRQ_POLARITY 0x04, 0x01, 0
+#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
+#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
+#define SR_RX_BL_CTRL 0x04, 0x10, 4
+#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
+#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
+#define SR_PA_EXT_EN 0x04, 0x80, 7
+#define RG_PHY_TX_PWR (0x05)
+#define SR_TX_PWR_23X 0x05, 0x0f, 0
+#define SR_PA_LT_230 0x05, 0x30, 4
+#define SR_PA_BUF_LT_230 0x05, 0xc0, 6
+#define SR_TX_PWR_212 0x05, 0x1f, 0
+#define SR_GC_PA_212 0x05, 0x60, 5
+#define SR_PA_BOOST_LT_212 0x05, 0x80, 7
+#define RG_PHY_RSSI (0x06)
+#define SR_RSSI 0x06, 0x1f, 0
+#define SR_RND_VALUE 0x06, 0x60, 5
+#define SR_RX_CRC_VALID 0x06, 0x80, 7
+#define RG_PHY_ED_LEVEL (0x07)
+#define SR_ED_LEVEL 0x07, 0xff, 0
+#define RG_PHY_CC_CCA (0x08)
+#define SR_CHANNEL 0x08, 0x1f, 0
+#define SR_CCA_MODE 0x08, 0x60, 5
+#define SR_CCA_REQUEST 0x08, 0x80, 7
+#define RG_CCA_THRES (0x09)
+#define SR_CCA_ED_THRES 0x09, 0x0f, 0
+#define SR_RESERVED_09_1 0x09, 0xf0, 4
+#define RG_RX_CTRL (0x0a)
+#define SR_PDT_THRES 0x0a, 0x0f, 0
+#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
+#define RG_SFD_VALUE (0x0b)
+#define SR_SFD_VALUE 0x0b, 0xff, 0
+#define RG_TRX_CTRL_2 (0x0c)
+#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
+#define SR_SUB_MODE 0x0c, 0x04, 2
+#define SR_BPSK_QPSK 0x0c, 0x08, 3
+#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4
+#define SR_RESERVED_0c_5 0x0c, 0x60, 5
+#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
+#define RG_ANT_DIV (0x0d)
+#define SR_ANT_CTRL 0x0d, 0x03, 0
+#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
+#define SR_ANT_DIV_EN 0x0d, 0x08, 3
+#define SR_RESERVED_0d_2 0x0d, 0x70, 4
+#define SR_ANT_SEL 0x0d, 0x80, 7
+#define RG_IRQ_MASK (0x0e)
+#define SR_IRQ_MASK 0x0e, 0xff, 0
+#define RG_IRQ_STATUS (0x0f)
+#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
+#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
+#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
+#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
+#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
+#define SR_IRQ_5_AMI 0x0f, 0x20, 5
+#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
+#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
+#define RG_VREG_CTRL (0x10)
+#define SR_RESERVED_10_6 0x10, 0x03, 0
+#define SR_DVDD_OK 0x10, 0x04, 2
+#define SR_DVREG_EXT 0x10, 0x08, 3
+#define SR_RESERVED_10_3 0x10, 0x30, 4
+#define SR_AVDD_OK 0x10, 0x40, 6
+#define SR_AVREG_EXT 0x10, 0x80, 7
+#define RG_BATMON (0x11)
+#define SR_BATMON_VTH 0x11, 0x0f, 0
+#define SR_BATMON_HR 0x11, 0x10, 4
+#define SR_BATMON_OK 0x11, 0x20, 5
+#define SR_RESERVED_11_1 0x11, 0xc0, 6
+#define RG_XOSC_CTRL (0x12)
+#define SR_XTAL_TRIM 0x12, 0x0f, 0
+#define SR_XTAL_MODE 0x12, 0xf0, 4
+#define RG_RX_SYN (0x15)
+#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
+#define SR_RESERVED_15_2 0x15, 0x70, 4
+#define SR_RX_PDT_DIS 0x15, 0x80, 7
+#define RG_XAH_CTRL_1 (0x17)
+#define SR_RESERVED_17_8 0x17, 0x01, 0
+#define SR_AACK_PROM_MODE 0x17, 0x02, 1
+#define SR_AACK_ACK_TIME 0x17, 0x04, 2
+#define SR_RESERVED_17_5 0x17, 0x08, 3
+#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
+#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
+#define SR_CSMA_LBT_MODE 0x17, 0x40, 6
+#define SR_RESERVED_17_1 0x17, 0x80, 7
+#define RG_FTN_CTRL (0x18)
+#define SR_RESERVED_18_2 0x18, 0x7f, 0
+#define SR_FTN_START 0x18, 0x80, 7
+#define RG_PLL_CF (0x1a)
+#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
+#define SR_PLL_CF_START 0x1a, 0x80, 7
+#define RG_PLL_DCU (0x1b)
+#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
+#define SR_RESERVED_1b_2 0x1b, 0x40, 6
+#define SR_PLL_DCU_START 0x1b, 0x80, 7
+#define RG_PART_NUM (0x1c)
+#define SR_PART_NUM 0x1c, 0xff, 0
+#define RG_VERSION_NUM (0x1d)
+#define SR_VERSION_NUM 0x1d, 0xff, 0
+#define RG_MAN_ID_0 (0x1e)
+#define SR_MAN_ID_0 0x1e, 0xff, 0
+#define RG_MAN_ID_1 (0x1f)
+#define SR_MAN_ID_1 0x1f, 0xff, 0
+#define RG_SHORT_ADDR_0 (0x20)
+#define SR_SHORT_ADDR_0 0x20, 0xff, 0
+#define RG_SHORT_ADDR_1 (0x21)
+#define SR_SHORT_ADDR_1 0x21, 0xff, 0
+#define RG_PAN_ID_0 (0x22)
+#define SR_PAN_ID_0 0x22, 0xff, 0
+#define RG_PAN_ID_1 (0x23)
+#define SR_PAN_ID_1 0x23, 0xff, 0
+#define RG_IEEE_ADDR_0 (0x24)
+#define SR_IEEE_ADDR_0 0x24, 0xff, 0
+#define RG_IEEE_ADDR_1 (0x25)
+#define SR_IEEE_ADDR_1 0x25, 0xff, 0
+#define RG_IEEE_ADDR_2 (0x26)
+#define SR_IEEE_ADDR_2 0x26, 0xff, 0
+#define RG_IEEE_ADDR_3 (0x27)
+#define SR_IEEE_ADDR_3 0x27, 0xff, 0
+#define RG_IEEE_ADDR_4 (0x28)
+#define SR_IEEE_ADDR_4 0x28, 0xff, 0
+#define RG_IEEE_ADDR_5 (0x29)
+#define SR_IEEE_ADDR_5 0x29, 0xff, 0
+#define RG_IEEE_ADDR_6 (0x2a)
+#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
+#define RG_IEEE_ADDR_7 (0x2b)
+#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
+#define RG_XAH_CTRL_0 (0x2c)
+#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
+#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
+#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
+#define RG_CSMA_SEED_0 (0x2d)
+#define SR_CSMA_SEED_0 0x2d, 0xff, 0
+#define RG_CSMA_SEED_1 (0x2e)
+#define SR_CSMA_SEED_1 0x2e, 0x07, 0
+#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
+#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
+#define SR_AACK_SET_PD 0x2e, 0x20, 5
+#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
+#define RG_CSMA_BE (0x2f)
+#define SR_MIN_BE 0x2f, 0x0f, 0
+#define SR_MAX_BE 0x2f, 0xf0, 4
+
+#define CMD_REG 0x80
+#define CMD_REG_MASK 0x3f
+#define CMD_WRITE 0x40
+#define CMD_FB 0x20
+
+#define IRQ_BAT_LOW BIT(7)
+#define IRQ_TRX_UR BIT(6)
+#define IRQ_AMI BIT(5)
+#define IRQ_CCA_ED BIT(4)
+#define IRQ_TRX_END BIT(3)
+#define IRQ_RX_START BIT(2)
+#define IRQ_PLL_UNL BIT(1)
+#define IRQ_PLL_LOCK BIT(0)
+
+#define IRQ_ACTIVE_HIGH 0
+#define IRQ_ACTIVE_LOW 1
+
+#define STATE_P_ON 0x00 /* BUSY */
+#define STATE_BUSY_RX 0x01
+#define STATE_BUSY_TX 0x02
+#define STATE_FORCE_TRX_OFF 0x03
+#define STATE_FORCE_TX_ON 0x04 /* IDLE */
+/* 0x05 */ /* INVALID_PARAMETER */
+#define STATE_RX_ON 0x06
+/* 0x07 */ /* SUCCESS */
+#define STATE_TRX_OFF 0x08
+#define STATE_TX_ON 0x09
+/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
+#define STATE_SLEEP 0x0F
+#define STATE_PREP_DEEP_SLEEP 0x10
+#define STATE_BUSY_RX_AACK 0x11
+#define STATE_BUSY_TX_ARET 0x12
+#define STATE_RX_AACK_ON 0x16
+#define STATE_TX_ARET_ON 0x19
+#define STATE_RX_ON_NOCLK 0x1C
+#define STATE_RX_AACK_ON_NOCLK 0x1D
+#define STATE_BUSY_RX_AACK_NOCLK 0x1E
+#define STATE_TRANSITION_IN_PROGRESS 0x1F
+
+#define TRX_STATE_MASK (0x1F)
+
+#endif /* !_AT86RF230_H */
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
new file mode 100644
index 000000000000..5b6bb9adf9ae
--- /dev/null
+++ b/drivers/net/ieee802154/atusb.c
@@ -0,0 +1,699 @@
+/*
+ * atusb.c - Driver for the ATUSB IEEE 802.15.4 dongle
+ *
+ * Written 2013 by Werner Almesberger <werner@almesberger.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2
+ *
+ * Based on at86rf230.c and spi_atusb.c.
+ * at86rf230.c is
+ * Copyright (C) 2009 Siemens AG
+ * Written by: Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
+ *
+ * spi_atusb.c is
+ * Copyright (c) 2011 Richard Sharpe <realrichardsharpe@gmail.com>
+ * Copyright (c) 2011 Stefan Schmidt <stefan@datenfreihafen.org>
+ * Copyright (c) 2011 Werner Almesberger <werner@almesberger.net>
+ *
+ * USB initialization is
+ * Copyright (c) 2013 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include <net/cfg802154.h>
+#include <net/mac802154.h>
+
+#include "at86rf230.h"
+#include "atusb.h"
+
+#define ATUSB_JEDEC_ATMEL 0x1f /* JEDEC manufacturer ID */
+
+#define ATUSB_NUM_RX_URBS 4 /* allow for a bit of local latency */
+#define ATUSB_ALLOC_DELAY_MS 100 /* delay after failed allocation */
+#define ATUSB_TX_TIMEOUT_MS 200 /* on the air timeout */
+
+struct atusb {
+ struct ieee802154_hw *hw;
+ struct usb_device *usb_dev;
+ int shutdown; /* non-zero if shutting down */
+ int err; /* set by first error */
+
+ /* RX variables */
+ struct delayed_work work; /* memory allocations */
+ struct usb_anchor idle_urbs; /* URBs waiting to be submitted */
+ struct usb_anchor rx_urbs; /* URBs waiting for reception */
+
+ /* TX variables */
+ struct usb_ctrlrequest tx_dr;
+ struct urb *tx_urb;
+ struct sk_buff *tx_skb;
+ uint8_t tx_ack_seq; /* current TX ACK sequence number */
+};
+
+/* at86rf230.h defines values as <reg, mask, shift> tuples. We use the more
+ * traditional style of having registers and or-able values. SR_REG extracts
+ * the register number. SR_VALUE uses the shift to prepare a value accordingly.
+ */
+
+#define __SR_REG(reg, mask, shift) (reg)
+#define SR_REG(sr) __SR_REG(sr)
+
+#define __SR_VALUE(reg, mask, shift, val) ((val) << (shift))
+#define SR_VALUE(sr, val) __SR_VALUE(sr, (val))
+
+/* ----- USB commands without data ----------------------------------------- */
+
+/* To reduce the number of error checks in the code, we record the first error
+ * in atusb->err and reject all subsequent requests until the error is cleared.
+ */
+
+static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
+ __u8 request, __u8 requesttype,
+ __u16 value, __u16 index,
+ void *data, __u16 size, int timeout)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ int ret;
+
+ if (atusb->err)
+ return atusb->err;
+
+ ret = usb_control_msg(usb_dev, pipe, request, requesttype,
+ value, index, data, size, timeout);
+ if (ret < 0) {
+ atusb->err = ret;
+ dev_err(&usb_dev->dev,
+ "atusb_control_msg: req 0x%02x val 0x%x idx 0x%x, error %d\n",
+ request, value, index, ret);
+ }
+ return ret;
+}
+
+static int atusb_command(struct atusb *atusb, uint8_t cmd, uint8_t arg)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+
+ dev_dbg(&usb_dev->dev, "atusb_command: cmd = 0x%x\n", cmd);
+ return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
+ cmd, ATUSB_REQ_TO_DEV, arg, 0, NULL, 0, 1000);
+}
+
+static int atusb_write_reg(struct atusb *atusb, uint8_t reg, uint8_t value)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+
+ dev_dbg(&usb_dev->dev, "atusb_write_reg: 0x%02x <- 0x%02x\n",
+ reg, value);
+ return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
+ ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ value, reg, NULL, 0, 1000);
+}
+
+static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ int ret;
+ uint8_t value;
+
+ dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
+ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+ ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, reg, &value, 1, 1000);
+ return ret >= 0 ? value : ret;
+}
+
+static int atusb_get_and_clear_error(struct atusb *atusb)
+{
+ int err = atusb->err;
+
+ atusb->err = 0;
+ return err;
+}
+
+/* ----- skb allocation ---------------------------------------------------- */
+
+#define MAX_PSDU 127
+#define MAX_RX_XFER (1 + MAX_PSDU + 2 + 1) /* PHR+PSDU+CRC+LQI */
+
+#define SKB_ATUSB(skb) (*(struct atusb **)(skb)->cb)
+
+static void atusb_in(struct urb *urb);
+
+static int atusb_submit_rx_urb(struct atusb *atusb, struct urb *urb)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ struct sk_buff *skb = urb->context;
+ int ret;
+
+ if (!skb) {
+ skb = alloc_skb(MAX_RX_XFER, GFP_KERNEL);
+ if (!skb) {
+ dev_warn_ratelimited(&usb_dev->dev,
+ "atusb_in: can't allocate skb\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, MAX_RX_XFER);
+ SKB_ATUSB(skb) = atusb;
+ }
+
+ usb_fill_bulk_urb(urb, usb_dev, usb_rcvbulkpipe(usb_dev, 1),
+ skb->data, MAX_RX_XFER, atusb_in, skb);
+ usb_anchor_urb(urb, &atusb->rx_urbs);
+
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ kfree_skb(skb);
+ urb->context = NULL;
+ }
+ return ret;
+}
+
+static void atusb_work_urbs(struct work_struct *work)
+{
+ struct atusb *atusb =
+ container_of(to_delayed_work(work), struct atusb, work);
+ struct usb_device *usb_dev = atusb->usb_dev;
+ struct urb *urb;
+ int ret;
+
+ if (atusb->shutdown)
+ return;
+
+ do {
+ urb = usb_get_from_anchor(&atusb->idle_urbs);
+ if (!urb)
+ return;
+ ret = atusb_submit_rx_urb(atusb, urb);
+ } while (!ret);
+
+ usb_anchor_urb(urb, &atusb->idle_urbs);
+ dev_warn_ratelimited(&usb_dev->dev,
+ "atusb_in: can't allocate/submit URB (%d)\n", ret);
+ schedule_delayed_work(&atusb->work,
+ msecs_to_jiffies(ATUSB_ALLOC_DELAY_MS) + 1);
+}
+
+/* ----- Asynchronous USB -------------------------------------------------- */
+
+static void atusb_tx_done(struct atusb *atusb, uint8_t seq)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ uint8_t expect = atusb->tx_ack_seq;
+
+ dev_dbg(&usb_dev->dev, "atusb_tx_done (0x%02x/0x%02x)\n", seq, expect);
+ if (seq == expect) {
+ /* TODO check for ifs handling in firmware */
+ ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false);
+ } else {
+ /* TODO I experience this case when atusb has a tx complete
+ * irq before probing, we should fix the firmware it's an
+ * unlikely case now that seq == expect is then true, but can
+ * happen and fail with a tx_skb = NULL;
+ */
+ ieee802154_wake_queue(atusb->hw);
+ if (atusb->tx_skb)
+ dev_kfree_skb_irq(atusb->tx_skb);
+ }
+}
+
+static void atusb_in_good(struct urb *urb)
+{
+ struct usb_device *usb_dev = urb->dev;
+ struct sk_buff *skb = urb->context;
+ struct atusb *atusb = SKB_ATUSB(skb);
+ uint8_t len, lqi;
+
+ if (!urb->actual_length) {
+ dev_dbg(&usb_dev->dev, "atusb_in: zero-sized URB ?\n");
+ return;
+ }
+
+ len = *skb->data;
+
+ if (urb->actual_length == 1) {
+ atusb_tx_done(atusb, len);
+ return;
+ }
+
+ if (len + 1 > urb->actual_length - 1) {
+ dev_dbg(&usb_dev->dev, "atusb_in: frame len %d+1 > URB %u-1\n",
+ len, urb->actual_length);
+ return;
+ }
+
+ if (!ieee802154_is_valid_psdu_len(len)) {
+ dev_dbg(&usb_dev->dev, "atusb_in: frame corrupted\n");
+ return;
+ }
+
+ lqi = skb->data[len + 1];
+ dev_dbg(&usb_dev->dev, "atusb_in: rx len %d lqi 0x%02x\n", len, lqi);
+ skb_pull(skb, 1); /* remove PHR */
+ skb_trim(skb, len); /* get payload only */
+ ieee802154_rx_irqsafe(atusb->hw, skb, lqi);
+ urb->context = NULL; /* skb is gone */
+}
+
+static void atusb_in(struct urb *urb)
+{
+ struct usb_device *usb_dev = urb->dev;
+ struct sk_buff *skb = urb->context;
+ struct atusb *atusb = SKB_ATUSB(skb);
+
+ dev_dbg(&usb_dev->dev, "atusb_in: status %d len %d\n",
+ urb->status, urb->actual_length);
+ if (urb->status) {
+ if (urb->status == -ENOENT) { /* being killed */
+ kfree_skb(skb);
+ urb->context = NULL;
+ return;
+ }
+ dev_dbg(&usb_dev->dev, "atusb_in: URB error %d\n", urb->status);
+ } else {
+ atusb_in_good(urb);
+ }
+
+ usb_anchor_urb(urb, &atusb->idle_urbs);
+ if (!atusb->shutdown)
+ schedule_delayed_work(&atusb->work, 0);
+}
+
+/* ----- URB allocation/deallocation --------------------------------------- */
+
+static void atusb_free_urbs(struct atusb *atusb)
+{
+ struct urb *urb;
+
+ while (1) {
+ urb = usb_get_from_anchor(&atusb->idle_urbs);
+ if (!urb)
+ break;
+ if (urb->context)
+ kfree_skb(urb->context);
+ usb_free_urb(urb);
+ }
+}
+
+static int atusb_alloc_urbs(struct atusb *atusb, int n)
+{
+ struct urb *urb;
+
+ while (n) {
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ atusb_free_urbs(atusb);
+ return -ENOMEM;
+ }
+ usb_anchor_urb(urb, &atusb->idle_urbs);
+ n--;
+ }
+ return 0;
+}
+
+/* ----- IEEE 802.15.4 interface operations -------------------------------- */
+
+static void atusb_xmit_complete(struct urb *urb)
+{
+ dev_dbg(&urb->dev->dev, "atusb_xmit urb completed");
+}
+
+static int atusb_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
+{
+ struct atusb *atusb = hw->priv;
+ struct usb_device *usb_dev = atusb->usb_dev;
+ int ret;
+
+ dev_dbg(&usb_dev->dev, "atusb_xmit (%d)\n", skb->len);
+ atusb->tx_skb = skb;
+ atusb->tx_ack_seq++;
+ atusb->tx_dr.wIndex = cpu_to_le16(atusb->tx_ack_seq);
+ atusb->tx_dr.wLength = cpu_to_le16(skb->len);
+
+ usb_fill_control_urb(atusb->tx_urb, usb_dev,
+ usb_sndctrlpipe(usb_dev, 0),
+ (unsigned char *)&atusb->tx_dr, skb->data,
+ skb->len, atusb_xmit_complete, NULL);
+ ret = usb_submit_urb(atusb->tx_urb, GFP_ATOMIC);
+ dev_dbg(&usb_dev->dev, "atusb_xmit done (%d)\n", ret);
+ return ret;
+}
+
+static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+{
+ struct atusb *atusb = hw->priv;
+ int ret;
+
+ /* This implicitly sets the CCA (Clear Channel Assessment) mode to 0,
+ * "Mode 3a, Carrier sense OR energy above threshold".
+ * We should probably make this configurable. @@@
+ */
+ ret = atusb_write_reg(atusb, RG_PHY_CC_CCA, channel);
+ if (ret < 0)
+ return ret;
+ msleep(1); /* @@@ ugly synchronization */
+ return 0;
+}
+
+static int atusb_ed(struct ieee802154_hw *hw, u8 *level)
+{
+ BUG_ON(!level);
+ *level = 0xbe;
+ return 0;
+}
+
+static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
+ struct ieee802154_hw_addr_filt *filt,
+ unsigned long changed)
+{
+ struct atusb *atusb = hw->priv;
+ struct device *dev = &atusb->usb_dev->dev;
+ uint8_t reg;
+
+ if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
+ u16 addr = le16_to_cpu(filt->short_addr);
+
+ dev_vdbg(dev, "atusb_set_hw_addr_filt called for saddr\n");
+ atusb_write_reg(atusb, RG_SHORT_ADDR_0, addr);
+ atusb_write_reg(atusb, RG_SHORT_ADDR_1, addr >> 8);
+ }
+
+ if (changed & IEEE802154_AFILT_PANID_CHANGED) {
+ u16 pan = le16_to_cpu(filt->pan_id);
+
+ dev_vdbg(dev, "atusb_set_hw_addr_filt called for pan id\n");
+ atusb_write_reg(atusb, RG_PAN_ID_0, pan);
+ atusb_write_reg(atusb, RG_PAN_ID_1, pan >> 8);
+ }
+
+ if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
+ u8 i, addr[IEEE802154_EXTENDED_ADDR_LEN];
+
+ memcpy(addr, &filt->ieee_addr, IEEE802154_EXTENDED_ADDR_LEN);
+ dev_vdbg(dev, "atusb_set_hw_addr_filt called for IEEE addr\n");
+ for (i = 0; i < 8; i++)
+ atusb_write_reg(atusb, RG_IEEE_ADDR_0 + i, addr[i]);
+ }
+
+ if (changed & IEEE802154_AFILT_PANC_CHANGED) {
+ dev_vdbg(dev,
+ "atusb_set_hw_addr_filt called for panc change\n");
+ reg = atusb_read_reg(atusb, SR_REG(SR_AACK_I_AM_COORD));
+ if (filt->pan_coord)
+ reg |= SR_VALUE(SR_AACK_I_AM_COORD, 1);
+ else
+ reg &= ~SR_VALUE(SR_AACK_I_AM_COORD, 1);
+ atusb_write_reg(atusb, SR_REG(SR_AACK_I_AM_COORD), reg);
+ }
+
+ return atusb_get_and_clear_error(atusb);
+}
+
+static int atusb_start(struct ieee802154_hw *hw)
+{
+ struct atusb *atusb = hw->priv;
+ struct usb_device *usb_dev = atusb->usb_dev;
+ int ret;
+
+ dev_dbg(&usb_dev->dev, "atusb_start\n");
+ schedule_delayed_work(&atusb->work, 0);
+ atusb_command(atusb, ATUSB_RX_MODE, 1);
+ ret = atusb_get_and_clear_error(atusb);
+ if (ret < 0)
+ usb_kill_anchored_urbs(&atusb->idle_urbs);
+ return ret;
+}
+
+static void atusb_stop(struct ieee802154_hw *hw)
+{
+ struct atusb *atusb = hw->priv;
+ struct usb_device *usb_dev = atusb->usb_dev;
+
+ dev_dbg(&usb_dev->dev, "atusb_stop\n");
+ usb_kill_anchored_urbs(&atusb->idle_urbs);
+ atusb_command(atusb, ATUSB_RX_MODE, 0);
+ atusb_get_and_clear_error(atusb);
+}
+
+static struct ieee802154_ops atusb_ops = {
+ .owner = THIS_MODULE,
+ .xmit_async = atusb_xmit,
+ .ed = atusb_ed,
+ .set_channel = atusb_channel,
+ .start = atusb_start,
+ .stop = atusb_stop,
+ .set_hw_addr_filt = atusb_set_hw_addr_filt,
+};
+
+/* ----- Firmware and chip version information ----------------------------- */
+
+static int atusb_get_and_show_revision(struct atusb *atusb)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ unsigned char buffer[3];
+ int ret;
+
+ /* Get a couple of the ATMega Firmware values */
+ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+ ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
+ buffer, 3, 1000);
+ if (ret >= 0)
+ dev_info(&usb_dev->dev,
+ "Firmware: major: %u, minor: %u, hardware type: %u\n",
+ buffer[0], buffer[1], buffer[2]);
+ if (buffer[0] == 0 && buffer[1] < 2) {
+ dev_info(&usb_dev->dev,
+ "Firmware version (%u.%u) is predates our first public release.",
+ buffer[0], buffer[1]);
+ dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
+ }
+
+ return ret;
+}
+
+static int atusb_get_and_show_build(struct atusb *atusb)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ char build[ATUSB_BUILD_SIZE + 1];
+ int ret;
+
+ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+ ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
+ build, ATUSB_BUILD_SIZE, 1000);
+ if (ret >= 0) {
+ build[ret] = 0;
+ dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
+ }
+
+ return ret;
+}
+
+static int atusb_get_and_show_chip(struct atusb *atusb)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ uint8_t man_id_0, man_id_1, part_num, version_num;
+
+ man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0);
+ man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1);
+ part_num = atusb_read_reg(atusb, RG_PART_NUM);
+ version_num = atusb_read_reg(atusb, RG_VERSION_NUM);
+
+ if (atusb->err)
+ return atusb->err;
+
+ if ((man_id_1 << 8 | man_id_0) != ATUSB_JEDEC_ATMEL) {
+ dev_err(&usb_dev->dev,
+ "non-Atmel transceiver xxxx%02x%02x\n",
+ man_id_1, man_id_0);
+ goto fail;
+ }
+ if (part_num != 3 && part_num != 2) {
+ dev_err(&usb_dev->dev,
+ "unexpected transceiver, part 0x%02x version 0x%02x\n",
+ part_num, version_num);
+ goto fail;
+ }
+
+ dev_info(&usb_dev->dev, "ATUSB: AT86RF231 version %d\n", version_num);
+
+ return 0;
+
+fail:
+ atusb->err = -ENODEV;
+ return -ENODEV;
+}
+
+/* ----- Setup ------------------------------------------------------------- */
+
+static int atusb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(interface);
+ struct ieee802154_hw *hw;
+ struct atusb *atusb = NULL;
+ int ret = -ENOMEM;
+
+ hw = ieee802154_alloc_hw(sizeof(struct atusb), &atusb_ops);
+ if (!hw)
+ return -ENOMEM;
+
+ atusb = hw->priv;
+ atusb->hw = hw;
+ atusb->usb_dev = usb_get_dev(usb_dev);
+ usb_set_intfdata(interface, atusb);
+
+ atusb->shutdown = 0;
+ atusb->err = 0;
+ INIT_DELAYED_WORK(&atusb->work, atusb_work_urbs);
+ init_usb_anchor(&atusb->idle_urbs);
+ init_usb_anchor(&atusb->rx_urbs);
+
+ if (atusb_alloc_urbs(atusb, ATUSB_NUM_RX_URBS))
+ goto fail;
+
+ atusb->tx_dr.bRequestType = ATUSB_REQ_TO_DEV;
+ atusb->tx_dr.bRequest = ATUSB_TX;
+ atusb->tx_dr.wValue = cpu_to_le16(0);
+
+ atusb->tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!atusb->tx_urb)
+ goto fail;
+
+ hw->parent = &usb_dev->dev;
+ hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
+ IEEE802154_HW_AACK;
+
+ hw->phy->current_page = 0;
+ hw->phy->current_channel = 11; /* reset default */
+ hw->phy->supported.channels[0] = 0x7FFF800;
+ ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+
+ atusb_command(atusb, ATUSB_RF_RESET, 0);
+ atusb_get_and_show_chip(atusb);
+ atusb_get_and_show_revision(atusb);
+ atusb_get_and_show_build(atusb);
+ ret = atusb_get_and_clear_error(atusb);
+ if (ret) {
+ dev_err(&atusb->usb_dev->dev,
+ "%s: initialization failed, error = %d\n",
+ __func__, ret);
+ goto fail;
+ }
+
+ ret = ieee802154_register_hw(hw);
+ if (ret)
+ goto fail;
+
+ /* If we just powered on, we're now in P_ON and need to enter TRX_OFF
+ * explicitly. Any resets after that will send us straight to TRX_OFF,
+ * making the command below redundant.
+ */
+ atusb_write_reg(atusb, RG_TRX_STATE, STATE_FORCE_TRX_OFF);
+ msleep(1); /* reset => TRX_OFF, tTR13 = 37 us */
+
+#if 0
+ /* Calculating the maximum time available to empty the frame buffer
+ * on reception:
+ *
+ * According to [1], the inter-frame gap is
+ * R * 20 * 16 us + 128 us
+ * where R is a random number from 0 to 7. Furthermore, we have 20 bit
+ * times (80 us at 250 kbps) of SHR of the next frame before the
+ * transceiver begins storing data in the frame buffer.
+ *
+ * This yields a minimum time of 208 us between the last data of a
+ * frame and the first data of the next frame. This time is further
+ * reduced by interrupt latency in the atusb firmware.
+ *
+ * atusb currently needs about 500 us to retrieve a maximum-sized
+ * frame. We therefore have to allow reception of a new frame to begin
+ * while we retrieve the previous frame.
+ *
+ * [1] "JN-AN-1035 Calculating data rates in an IEEE 802.15.4-based
+ * network", Jennic 2006.
+ * http://www.jennic.com/download_file.php?supportFile=JN-AN-1035%20Calculating%20802-15-4%20Data%20Rates-1v0.pdf
+ */
+
+ atusb_write_reg(atusb,
+ SR_REG(SR_RX_SAFE_MODE), SR_VALUE(SR_RX_SAFE_MODE, 1));
+#endif
+ atusb_write_reg(atusb, RG_IRQ_MASK, 0xff);
+
+ ret = atusb_get_and_clear_error(atusb);
+ if (!ret)
+ return 0;
+
+ dev_err(&atusb->usb_dev->dev,
+ "%s: setup failed, error = %d\n",
+ __func__, ret);
+
+ ieee802154_unregister_hw(hw);
+fail:
+ atusb_free_urbs(atusb);
+ usb_kill_urb(atusb->tx_urb);
+ usb_free_urb(atusb->tx_urb);
+ usb_put_dev(usb_dev);
+ ieee802154_free_hw(hw);
+ return ret;
+}
+
+static void atusb_disconnect(struct usb_interface *interface)
+{
+ struct atusb *atusb = usb_get_intfdata(interface);
+
+ dev_dbg(&atusb->usb_dev->dev, "atusb_disconnect\n");
+
+ atusb->shutdown = 1;
+ cancel_delayed_work_sync(&atusb->work);
+
+ usb_kill_anchored_urbs(&atusb->rx_urbs);
+ atusb_free_urbs(atusb);
+ usb_kill_urb(atusb->tx_urb);
+ usb_free_urb(atusb->tx_urb);
+
+ ieee802154_unregister_hw(atusb->hw);
+
+ ieee802154_free_hw(atusb->hw);
+
+ usb_set_intfdata(interface, NULL);
+ usb_put_dev(atusb->usb_dev);
+
+ pr_debug("atusb_disconnect done\n");
+}
+
+/* The devices we work with */
+static const struct usb_device_id atusb_device_table[] = {
+ {
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = ATUSB_VENDOR_ID,
+ .idProduct = ATUSB_PRODUCT_ID,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC
+ },
+ /* end with null element */
+ {}
+};
+MODULE_DEVICE_TABLE(usb, atusb_device_table);
+
+static struct usb_driver atusb_driver = {
+ .name = "atusb",
+ .probe = atusb_probe,
+ .disconnect = atusb_disconnect,
+ .id_table = atusb_device_table,
+};
+module_usb_driver(atusb_driver);
+
+MODULE_AUTHOR("Alexander Aring <alex.aring@gmail.com>");
+MODULE_AUTHOR("Richard Sharpe <realrichardsharpe@gmail.com>");
+MODULE_AUTHOR("Stefan Schmidt <stefan@datenfreihafen.org>");
+MODULE_AUTHOR("Werner Almesberger <werner@almesberger.net>");
+MODULE_DESCRIPTION("ATUSB IEEE 802.15.4 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/atusb.h b/drivers/net/ieee802154/atusb.h
new file mode 100644
index 000000000000..0690edcad57b
--- /dev/null
+++ b/drivers/net/ieee802154/atusb.h
@@ -0,0 +1,84 @@
+/*
+ * atusb.h - Definitions shared between kernel and ATUSB firmware
+ *
+ * Written 2013 by Werner Almesberger <werner@almesberger.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2, or
+ * (at your option) any later version.
+ *
+ * This file should be identical for kernel and firmware.
+ * Kernel: drivers/net/ieee802154/atusb.h
+ * Firmware: ben-wpan/atusb/fw/include/atusb/atusb.h
+ */
+
+#ifndef _ATUSB_H
+#define _ATUSB_H
+
+#define ATUSB_VENDOR_ID 0x20b7 /* Qi Hardware*/
+#define ATUSB_PRODUCT_ID 0x1540 /* 802.15.4, device 0 */
+ /* -- - - */
+
+#define ATUSB_BUILD_SIZE 256 /* maximum build version/date message length */
+
+/* Commands to our device. Make sure this is synced with the firmware */
+enum atusb_requests {
+ ATUSB_ID = 0x00, /* system status/control grp */
+ ATUSB_BUILD,
+ ATUSB_RESET,
+ ATUSB_RF_RESET = 0x10, /* debug/test group */
+ ATUSB_POLL_INT,
+ ATUSB_TEST, /* atusb-sil only */
+ ATUSB_TIMER,
+ ATUSB_GPIO,
+ ATUSB_SLP_TR,
+ ATUSB_GPIO_CLEANUP,
+ ATUSB_REG_WRITE = 0x20, /* transceiver group */
+ ATUSB_REG_READ,
+ ATUSB_BUF_WRITE,
+ ATUSB_BUF_READ,
+ ATUSB_SRAM_WRITE,
+ ATUSB_SRAM_READ,
+ ATUSB_SPI_WRITE = 0x30, /* SPI group */
+ ATUSB_SPI_READ1,
+ ATUSB_SPI_READ2,
+ ATUSB_SPI_WRITE2_SYNC,
+ ATUSB_RX_MODE = 0x40, /* HardMAC group */
+ ATUSB_TX,
+};
+
+/* Direction bRequest wValue wIndex wLength
+ *
+ * ->host ATUSB_ID - - 3
+ * ->host ATUSB_BUILD - - #bytes
+ * host-> ATUSB_RESET - - 0
+ *
+ * host-> ATUSB_RF_RESET - - 0
+ * ->host ATUSB_POLL_INT - - 1
+ * host-> ATUSB_TEST - - 0
+ * ->host ATUSB_TIMER - - #bytes (6)
+ * ->host ATUSB_GPIO dir+data mask+p# 3
+ * host-> ATUSB_SLP_TR - - 0
+ * host-> ATUSB_GPIO_CLEANUP - - 0
+ *
+ * host-> ATUSB_REG_WRITE value addr 0
+ * ->host ATUSB_REG_READ - addr 1
+ * host-> ATUSB_BUF_WRITE - - #bytes
+ * ->host ATUSB_BUF_READ - - #bytes
+ * host-> ATUSB_SRAM_WRITE - addr #bytes
+ * ->host ATUSB_SRAM_READ - addr #bytes
+ *
+ * host-> ATUSB_SPI_WRITE byte0 byte1 #bytes
+ * ->host ATUSB_SPI_READ1 byte0 - #bytes
+ * ->host ATUSB_SPI_READ2 byte0 byte1 #bytes
+ * ->host ATUSB_SPI_WRITE2_SYNC byte0 byte1 0/1
+ *
+ * host-> ATUSB_RX_MODE on - 0
+ * host-> ATUSB_TX flags ack_seq #bytes
+ */
+
+#define ATUSB_REQ_FROM_DEV (USB_TYPE_VENDOR | USB_DIR_IN)
+#define ATUSB_REQ_TO_DEV (USB_TYPE_VENDOR | USB_DIR_OUT)
+
+#endif /* !_ATUSB_H */
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index f833b8bb6663..84b28a05c5a1 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -653,7 +653,7 @@ static int cc2520_register(struct cc2520_private *priv)
ieee802154_random_extended_addr(&priv->hw->phy->perm_extended_addr);
/* We do support only 2.4 Ghz */
- priv->hw->phy->channels_supported[0] = 0x7FFF800;
+ priv->hw->phy->supported.channels[0] = 0x7FFF800;
priv->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
IEEE802154_HW_AFILT;
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index dc2bfb600b4b..9d0da4ec3e8c 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -27,25 +27,25 @@
#include <net/mac802154.h>
#include <net/cfg802154.h>
-static int numlbs = 1;
+static int numlbs = 2;
-struct fakelb_dev_priv {
- struct ieee802154_hw *hw;
+static LIST_HEAD(fakelb_phys);
+static DEFINE_SPINLOCK(fakelb_phys_lock);
- struct list_head list;
- struct fakelb_priv *fake;
+static LIST_HEAD(fakelb_ifup_phys);
+static DEFINE_RWLOCK(fakelb_ifup_phys_lock);
- spinlock_t lock;
- bool working;
-};
+struct fakelb_phy {
+ struct ieee802154_hw *hw;
+
+ u8 page;
+ u8 channel;
-struct fakelb_priv {
struct list_head list;
- rwlock_t lock;
+ struct list_head list_ifup;
};
-static int
-fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
+static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
{
BUG_ON(!level);
*level = 0xbe;
@@ -53,78 +53,63 @@ fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
return 0;
}
-static int
-fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+static int fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
- pr_debug("set channel to %d\n", channel);
+ struct fakelb_phy *phy = hw->priv;
+ write_lock_bh(&fakelb_ifup_phys_lock);
+ phy->page = page;
+ phy->channel = channel;
+ write_unlock_bh(&fakelb_ifup_phys_lock);
return 0;
}
-static void
-fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb)
+static int fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
{
- struct sk_buff *newskb;
+ struct fakelb_phy *current_phy = hw->priv, *phy;
- spin_lock(&priv->lock);
- if (priv->working) {
- newskb = pskb_copy(skb, GFP_ATOMIC);
- ieee802154_rx_irqsafe(priv->hw, newskb, 0xcc);
- }
- spin_unlock(&priv->lock);
-}
+ read_lock_bh(&fakelb_ifup_phys_lock);
+ list_for_each_entry(phy, &fakelb_ifup_phys, list_ifup) {
+ if (current_phy == phy)
+ continue;
-static int
-fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
-{
- struct fakelb_dev_priv *priv = hw->priv;
- struct fakelb_priv *fake = priv->fake;
-
- read_lock_bh(&fake->lock);
- if (priv->list.next == priv->list.prev) {
- /* we are the only one device */
- fakelb_hw_deliver(priv, skb);
- } else {
- struct fakelb_dev_priv *dp;
- list_for_each_entry(dp, &priv->fake->list, list) {
- if (dp != priv &&
- (dp->hw->phy->current_channel ==
- priv->hw->phy->current_channel))
- fakelb_hw_deliver(dp, skb);
+ if (current_phy->page == phy->page &&
+ current_phy->channel == phy->channel) {
+ struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC);
+
+ if (newskb)
+ ieee802154_rx_irqsafe(phy->hw, newskb, 0xcc);
}
}
- read_unlock_bh(&fake->lock);
+ read_unlock_bh(&fakelb_ifup_phys_lock);
+ ieee802154_xmit_complete(hw, skb, false);
return 0;
}
-static int
-fakelb_hw_start(struct ieee802154_hw *hw) {
- struct fakelb_dev_priv *priv = hw->priv;
- int ret = 0;
+static int fakelb_hw_start(struct ieee802154_hw *hw)
+{
+ struct fakelb_phy *phy = hw->priv;
- spin_lock(&priv->lock);
- if (priv->working)
- ret = -EBUSY;
- else
- priv->working = 1;
- spin_unlock(&priv->lock);
+ write_lock_bh(&fakelb_ifup_phys_lock);
+ list_add(&phy->list_ifup, &fakelb_ifup_phys);
+ write_unlock_bh(&fakelb_ifup_phys_lock);
- return ret;
+ return 0;
}
-static void
-fakelb_hw_stop(struct ieee802154_hw *hw) {
- struct fakelb_dev_priv *priv = hw->priv;
+static void fakelb_hw_stop(struct ieee802154_hw *hw)
+{
+ struct fakelb_phy *phy = hw->priv;
- spin_lock(&priv->lock);
- priv->working = 0;
- spin_unlock(&priv->lock);
+ write_lock_bh(&fakelb_ifup_phys_lock);
+ list_del(&phy->list_ifup);
+ write_unlock_bh(&fakelb_ifup_phys_lock);
}
static const struct ieee802154_ops fakelb_ops = {
.owner = THIS_MODULE,
- .xmit_sync = fakelb_hw_xmit,
+ .xmit_async = fakelb_hw_xmit,
.ed = fakelb_hw_ed,
.set_channel = fakelb_hw_channel,
.start = fakelb_hw_start,
@@ -135,54 +120,54 @@ static const struct ieee802154_ops fakelb_ops = {
module_param(numlbs, int, 0);
MODULE_PARM_DESC(numlbs, " number of pseudo devices");
-static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
+static int fakelb_add_one(struct device *dev)
{
- struct fakelb_dev_priv *priv;
- int err;
struct ieee802154_hw *hw;
+ struct fakelb_phy *phy;
+ int err;
- hw = ieee802154_alloc_hw(sizeof(*priv), &fakelb_ops);
+ hw = ieee802154_alloc_hw(sizeof(*phy), &fakelb_ops);
if (!hw)
return -ENOMEM;
- priv = hw->priv;
- priv->hw = hw;
+ phy = hw->priv;
+ phy->hw = hw;
/* 868 MHz BPSK 802.15.4-2003 */
- hw->phy->channels_supported[0] |= 1;
+ hw->phy->supported.channels[0] |= 1;
/* 915 MHz BPSK 802.15.4-2003 */
- hw->phy->channels_supported[0] |= 0x7fe;
+ hw->phy->supported.channels[0] |= 0x7fe;
/* 2.4 GHz O-QPSK 802.15.4-2003 */
- hw->phy->channels_supported[0] |= 0x7FFF800;
+ hw->phy->supported.channels[0] |= 0x7FFF800;
/* 868 MHz ASK 802.15.4-2006 */
- hw->phy->channels_supported[1] |= 1;
+ hw->phy->supported.channels[1] |= 1;
/* 915 MHz ASK 802.15.4-2006 */
- hw->phy->channels_supported[1] |= 0x7fe;
+ hw->phy->supported.channels[1] |= 0x7fe;
/* 868 MHz O-QPSK 802.15.4-2006 */
- hw->phy->channels_supported[2] |= 1;
+ hw->phy->supported.channels[2] |= 1;
/* 915 MHz O-QPSK 802.15.4-2006 */
- hw->phy->channels_supported[2] |= 0x7fe;
+ hw->phy->supported.channels[2] |= 0x7fe;
/* 2.4 GHz CSS 802.15.4a-2007 */
- hw->phy->channels_supported[3] |= 0x3fff;
+ hw->phy->supported.channels[3] |= 0x3fff;
/* UWB Sub-gigahertz 802.15.4a-2007 */
- hw->phy->channels_supported[4] |= 1;
+ hw->phy->supported.channels[4] |= 1;
/* UWB Low band 802.15.4a-2007 */
- hw->phy->channels_supported[4] |= 0x1e;
+ hw->phy->supported.channels[4] |= 0x1e;
/* UWB High band 802.15.4a-2007 */
- hw->phy->channels_supported[4] |= 0xffe0;
+ hw->phy->supported.channels[4] |= 0xffe0;
/* 750 MHz O-QPSK 802.15.4c-2009 */
- hw->phy->channels_supported[5] |= 0xf;
+ hw->phy->supported.channels[5] |= 0xf;
/* 750 MHz MPSK 802.15.4c-2009 */
- hw->phy->channels_supported[5] |= 0xf0;
+ hw->phy->supported.channels[5] |= 0xf0;
/* 950 MHz BPSK 802.15.4d-2009 */
- hw->phy->channels_supported[6] |= 0x3ff;
+ hw->phy->supported.channels[6] |= 0x3ff;
/* 950 MHz GFSK 802.15.4d-2009 */
- hw->phy->channels_supported[6] |= 0x3ffc00;
+ hw->phy->supported.channels[6] |= 0x3ffc00;
- INIT_LIST_HEAD(&priv->list);
- priv->fake = fake;
-
- spin_lock_init(&priv->lock);
+ ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+ /* fake phy channel 13 as default */
+ hw->phy->current_channel = 13;
+ phy->channel = hw->phy->current_channel;
hw->parent = dev;
@@ -190,67 +175,55 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
if (err)
goto err_reg;
- write_lock_bh(&fake->lock);
- list_add_tail(&priv->list, &fake->list);
- write_unlock_bh(&fake->lock);
+ spin_lock(&fakelb_phys_lock);
+ list_add_tail(&phy->list, &fakelb_phys);
+ spin_unlock(&fakelb_phys_lock);
return 0;
err_reg:
- ieee802154_free_hw(priv->hw);
+ ieee802154_free_hw(phy->hw);
return err;
}
-static void fakelb_del(struct fakelb_dev_priv *priv)
+static void fakelb_del(struct fakelb_phy *phy)
{
- write_lock_bh(&priv->fake->lock);
- list_del(&priv->list);
- write_unlock_bh(&priv->fake->lock);
+ list_del(&phy->list);
- ieee802154_unregister_hw(priv->hw);
- ieee802154_free_hw(priv->hw);
+ ieee802154_unregister_hw(phy->hw);
+ ieee802154_free_hw(phy->hw);
}
static int fakelb_probe(struct platform_device *pdev)
{
- struct fakelb_priv *priv;
- struct fakelb_dev_priv *dp;
- int err = -ENOMEM;
- int i;
-
- priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv),
- GFP_KERNEL);
- if (!priv)
- goto err_alloc;
-
- INIT_LIST_HEAD(&priv->list);
- rwlock_init(&priv->lock);
+ struct fakelb_phy *phy, *tmp;
+ int err, i;
for (i = 0; i < numlbs; i++) {
- err = fakelb_add_one(&pdev->dev, priv);
+ err = fakelb_add_one(&pdev->dev);
if (err < 0)
goto err_slave;
}
- platform_set_drvdata(pdev, priv);
dev_info(&pdev->dev, "added ieee802154 hardware\n");
return 0;
err_slave:
- list_for_each_entry(dp, &priv->list, list)
- fakelb_del(dp);
-err_alloc:
+ spin_lock(&fakelb_phys_lock);
+ list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
+ fakelb_del(phy);
+ spin_unlock(&fakelb_phys_lock);
return err;
}
static int fakelb_remove(struct platform_device *pdev)
{
- struct fakelb_priv *priv = platform_get_drvdata(pdev);
- struct fakelb_dev_priv *dp, *temp;
-
- list_for_each_entry_safe(dp, temp, &priv->list, list)
- fakelb_del(dp);
+ struct fakelb_phy *phy, *tmp;
+ spin_lock(&fakelb_phys_lock);
+ list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
+ fakelb_del(phy);
+ spin_unlock(&fakelb_phys_lock);
return 0;
}
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index fba2dfd910f7..f2a1bd122a74 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -750,7 +750,7 @@ static int mrf24j40_probe(struct spi_device *spi)
devrec->hw->priv = devrec;
devrec->hw->parent = &devrec->spi->dev;
- devrec->hw->phy->channels_supported[0] = CHANNEL_MASK;
+ devrec->hw->phy->supported.channels[0] = CHANNEL_MASK;
devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
IEEE802154_HW_AFILT;
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 54549a6223dd..953a97492fab 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -39,6 +39,8 @@
#define IPVLAN_MAC_FILTER_SIZE (1 << IPVLAN_MAC_FILTER_BITS)
#define IPVLAN_MAC_FILTER_MASK (IPVLAN_MAC_FILTER_SIZE - 1)
+#define IPVLAN_QBACKLOG_LIMIT 1000
+
typedef enum {
IPVL_IPV6 = 0,
IPVL_ICMPV6,
@@ -93,6 +95,8 @@ struct ipvl_port {
struct hlist_head hlhead[IPVLAN_HASH_SIZE];
struct list_head ipvlans;
struct rcu_head rcu;
+ struct work_struct wq;
+ struct sk_buff_head backlog;
int count;
u16 mode;
};
@@ -112,6 +116,7 @@ void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval);
void ipvlan_init_secret(void);
unsigned int ipvlan_mac_hash(const unsigned char *addr);
rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
+void ipvlan_process_multicast(struct work_struct *work);
int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index c30b5c300c05..8afbedad620d 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -189,62 +189,69 @@ unsigned int ipvlan_mac_hash(const unsigned char *addr)
return hash & IPVLAN_MAC_FILTER_MASK;
}
-static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
- const struct ipvl_dev *in_dev, bool local)
+void ipvlan_process_multicast(struct work_struct *work)
{
- struct ethhdr *eth = eth_hdr(skb);
+ struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
+ struct ethhdr *ethh;
struct ipvl_dev *ipvlan;
- struct sk_buff *nskb;
+ struct sk_buff *skb, *nskb;
+ struct sk_buff_head list;
unsigned int len;
unsigned int mac_hash;
int ret;
+ u8 pkt_type;
+ bool hlocal, dlocal;
- if (skb->protocol == htons(ETH_P_PAUSE))
- return;
-
- rcu_read_lock();
- list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
- if (local && (ipvlan == in_dev))
- continue;
+ __skb_queue_head_init(&list);
- mac_hash = ipvlan_mac_hash(eth->h_dest);
- if (!test_bit(mac_hash, ipvlan->mac_filters))
- continue;
+ spin_lock_bh(&port->backlog.lock);
+ skb_queue_splice_tail_init(&port->backlog, &list);
+ spin_unlock_bh(&port->backlog.lock);
- ret = NET_RX_DROP;
- len = skb->len + ETH_HLEN;
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- goto mcast_acct;
+ while ((skb = __skb_dequeue(&list)) != NULL) {
+ ethh = eth_hdr(skb);
+ hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr);
+ mac_hash = ipvlan_mac_hash(ethh->h_dest);
- if (ether_addr_equal(eth->h_dest, ipvlan->phy_dev->broadcast))
- nskb->pkt_type = PACKET_BROADCAST;
+ if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
+ pkt_type = PACKET_BROADCAST;
else
- nskb->pkt_type = PACKET_MULTICAST;
-
- nskb->dev = ipvlan->dev;
- if (local)
- ret = dev_forward_skb(ipvlan->dev, nskb);
- else
- ret = netif_rx(nskb);
-mcast_acct:
- ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
- }
- rcu_read_unlock();
-
- /* Locally generated? ...Forward a copy to the main-device as
- * well. On the RX side we'll ignore it (wont give it to any
- * of the virtual devices.
- */
- if (local) {
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (nskb) {
- if (ether_addr_equal(eth->h_dest, port->dev->broadcast))
- nskb->pkt_type = PACKET_BROADCAST;
+ pkt_type = PACKET_MULTICAST;
+
+ dlocal = false;
+ rcu_read_lock();
+ list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
+ if (hlocal && (ipvlan->dev == skb->dev)) {
+ dlocal = true;
+ continue;
+ }
+ if (!test_bit(mac_hash, ipvlan->mac_filters))
+ continue;
+
+ ret = NET_RX_DROP;
+ len = skb->len + ETH_HLEN;
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ goto acct;
+
+ nskb->pkt_type = pkt_type;
+ nskb->dev = ipvlan->dev;
+ if (hlocal)
+ ret = dev_forward_skb(ipvlan->dev, nskb);
else
- nskb->pkt_type = PACKET_MULTICAST;
-
- dev_forward_skb(port->dev, nskb);
+ ret = netif_rx(nskb);
+acct:
+ ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
+ }
+ rcu_read_unlock();
+
+ if (dlocal) {
+ /* If the packet originated here, send it out. */
+ skb->dev = port->dev;
+ skb->pkt_type = pkt_type;
+ dev_queue_xmit(skb);
+ } else {
+ kfree_skb(skb);
}
}
}
@@ -446,6 +453,26 @@ out:
return ret;
}
+static void ipvlan_multicast_enqueue(struct ipvl_port *port,
+ struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_PAUSE)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ spin_lock(&port->backlog.lock);
+ if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
+ __skb_queue_tail(&port->backlog, skb);
+ spin_unlock(&port->backlog.lock);
+ schedule_work(&port->wq);
+ } else {
+ spin_unlock(&port->backlog.lock);
+ atomic_long_inc(&skb->dev->rx_dropped);
+ kfree_skb(skb);
+ }
+}
+
static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
@@ -493,11 +520,8 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
return dev_forward_skb(ipvlan->phy_dev, skb);
} else if (is_multicast_ether_addr(eth->h_dest)) {
- u8 ip_summed = skb->ip_summed;
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- ipvlan_multicast_frame(ipvlan->port, skb, ipvlan, true);
- skb->ip_summed = ip_summed;
+ ipvlan_multicast_enqueue(ipvlan->port, skb);
+ return NET_XMIT_SUCCESS;
}
skb->dev = ipvlan->phy_dev;
@@ -581,8 +605,18 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
int addr_type;
if (is_multicast_ether_addr(eth->h_dest)) {
- if (ipvlan_external_frame(skb, port))
- ipvlan_multicast_frame(port, skb, NULL, false);
+ if (ipvlan_external_frame(skb, port)) {
+ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+ /* External frames are queued for device local
+ * distribution, but a copy is given to master
+ * straight away to avoid sending duplicates later
+ * when work-queue processes this frame. This is
+ * achieved by returning RX_HANDLER_PASS.
+ */
+ if (nskb)
+ ipvlan_multicast_enqueue(port, nskb);
+ }
} else {
struct ipvl_addr *addr;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 77b92a0fe557..1acc283160d9 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -54,6 +54,9 @@ static int ipvlan_port_create(struct net_device *dev)
for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
INIT_HLIST_HEAD(&port->hlhead[idx]);
+ skb_queue_head_init(&port->backlog);
+ INIT_WORK(&port->wq, ipvlan_process_multicast);
+
err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port);
if (err)
goto err;
@@ -72,6 +75,8 @@ static void ipvlan_port_destroy(struct net_device *dev)
dev->priv_flags &= ~IFF_IPVLAN_MASTER;
netdev_rx_handler_unregister(dev);
+ cancel_work_sync(&port->wq);
+ __skb_queue_purge(&port->backlog);
kfree_rcu(port, rcu);
}
@@ -213,17 +218,6 @@ static void ipvlan_change_rx_flags(struct net_device *dev, int change)
dev_set_allmulti(phy_dev, dev->flags & IFF_ALLMULTI? 1 : -1);
}
-static void ipvlan_set_broadcast_mac_filter(struct ipvl_dev *ipvlan, bool set)
-{
- struct net_device *dev = ipvlan->dev;
- unsigned int hashbit = ipvlan_mac_hash(dev->broadcast);
-
- if (set && !test_bit(hashbit, ipvlan->mac_filters))
- __set_bit(hashbit, ipvlan->mac_filters);
- else if (!set && test_bit(hashbit, ipvlan->mac_filters))
- __clear_bit(hashbit, ipvlan->mac_filters);
-}
-
static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
@@ -238,6 +232,12 @@ static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev)
__set_bit(ipvlan_mac_hash(ha->addr), mc_filters);
+ /* Turn-on broadcast bit irrespective of address family,
+ * since broadcast is deferred to a work-queue, hence no
+ * impact on fast-path processing.
+ */
+ __set_bit(ipvlan_mac_hash(dev->broadcast), mc_filters);
+
bitmap_copy(ipvlan->mac_filters, mc_filters,
IPVLAN_MAC_FILTER_SIZE);
}
@@ -705,7 +705,6 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
*/
if (netif_running(ipvlan->dev))
ipvlan_ht_addr_add(ipvlan, addr);
- ipvlan_set_broadcast_mac_filter(ipvlan, true);
return 0;
}
@@ -722,8 +721,6 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
list_del(&addr->anode);
ipvlan->ipv4cnt--;
WARN_ON(ipvlan->ipv4cnt < 0);
- if (!ipvlan->ipv4cnt)
- ipvlan_set_broadcast_mac_filter(ipvlan, false);
kfree_rcu(addr, rcu);
return;
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index f6c916312577..25f21968fa5c 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -848,7 +848,9 @@ static void irda_usb_receive(struct urb *urb)
* Jean II */
self->rx_defer_timer.function = irda_usb_rx_defer_expired;
self->rx_defer_timer.data = (unsigned long) urb;
- mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
+ mod_timer(&self->rx_defer_timer,
+ jiffies + msecs_to_jiffies(10));
+
return;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 8c350c5d54ad..483afb19596c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -476,7 +476,7 @@ static int macvtap_open(struct inode *inode, struct file *file)
err = -ENOMEM;
q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
- &macvtap_proto);
+ &macvtap_proto, 0);
if (!q)
goto out;
@@ -1006,6 +1006,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
unsigned int __user *up = argp;
unsigned short u;
int __user *sp = argp;
+ struct sockaddr sa;
int s;
int ret;
@@ -1101,6 +1102,37 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
rtnl_unlock();
return ret;
+ case SIOCGIFHWADDR:
+ rtnl_lock();
+ vlan = macvtap_get_vlan(q);
+ if (!vlan) {
+ rtnl_unlock();
+ return -ENOLINK;
+ }
+ ret = 0;
+ u = vlan->dev->type;
+ if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
+ copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) ||
+ put_user(u, &ifr->ifr_hwaddr.sa_family))
+ ret = -EFAULT;
+ macvtap_put_vlan(vlan);
+ rtnl_unlock();
+ return ret;
+
+ case SIOCSIFHWADDR:
+ if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
+ return -EFAULT;
+ rtnl_lock();
+ vlan = macvtap_get_vlan(q);
+ if (!vlan) {
+ rtnl_unlock();
+ return -ENOLINK;
+ }
+ ret = dev_set_mac_address(vlan->dev, &sa);
+ macvtap_put_vlan(vlan);
+ rtnl_unlock();
+ return ret;
+
default:
return -EINVAL;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 70641d2c0429..cf18940f4e84 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -24,13 +24,6 @@ config AMD_PHY
---help---
Currently supports the am79c874
-config AMD_XGBE_PHY
- tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
- depends on (OF || ACPI) && HAS_IOMEM
- depends on ARM64 || COMPILE_TEST
- ---help---
- Currently supports the AMD 10GbE PHY
-
config MARVELL_PHY
tristate "Drivers for Marvell PHYs"
---help---
@@ -119,6 +112,11 @@ config MICREL_PHY
---help---
Supports the KSZ9021, VSC8201, KS8001 PHYs.
+config DP83867_PHY
+ tristate "Drivers for Texas Instruments DP83867 Gigabit PHY"
+ ---help---
+ Currently supports the DP83867 PHY.
+
config FIXED_PHY
tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB
@@ -212,7 +210,6 @@ config MDIO_BCM_UNIMAC
This hardware can be found in the Broadcom GENET Ethernet MAC
controllers as well as some Broadcom Ethernet switches such as the
Starfighter 2 switches.
-
endif # PHYLIB
config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 501ea7699a2d..fcc25a0c45cd 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_DP83640_PHY) += dp83640.o
+obj-$(CONFIG_DP83867_PHY) += dp83867.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
@@ -33,5 +34,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
-obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o
obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
deleted file mode 100644
index 34a75cba3b73..000000000000
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ /dev/null
@@ -1,1901 +0,0 @@
-/*
- * AMD 10Gb Ethernet PHY driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/workqueue.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/phy.h>
-#include <linux/mdio.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_device.h>
-#include <linux/uaccess.h>
-#include <linux/bitops.h>
-#include <linux/property.h>
-#include <linux/acpi.h>
-#include <linux/jiffies.h>
-
-MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION("1.0.0-a");
-MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
-
-#define XGBE_PHY_ID 0x000162d0
-#define XGBE_PHY_MASK 0xfffffff0
-
-#define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set"
-#define XGBE_PHY_BLWC_PROPERTY "amd,serdes-blwc"
-#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
-#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
-#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
-#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
-#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
-
-#define XGBE_PHY_SPEEDS 3
-#define XGBE_PHY_SPEED_1000 0
-#define XGBE_PHY_SPEED_2500 1
-#define XGBE_PHY_SPEED_10000 2
-
-#define XGBE_AN_MS_TIMEOUT 500
-
-#define XGBE_AN_INT_CMPLT 0x01
-#define XGBE_AN_INC_LINK 0x02
-#define XGBE_AN_PG_RCV 0x04
-#define XGBE_AN_INT_MASK 0x07
-
-#define XNP_MCF_NULL_MESSAGE 0x001
-#define XNP_ACK_PROCESSED BIT(12)
-#define XNP_MP_FORMATTED BIT(13)
-#define XNP_NP_EXCHANGE BIT(15)
-
-#define XGBE_PHY_RATECHANGE_COUNT 500
-
-#define XGBE_PHY_KR_TRAINING_START 0x01
-#define XGBE_PHY_KR_TRAINING_ENABLE 0x02
-
-#define XGBE_PHY_FEC_ENABLE 0x01
-#define XGBE_PHY_FEC_FORWARD 0x02
-#define XGBE_PHY_FEC_MASK 0x03
-
-#ifndef MDIO_PMA_10GBR_PMD_CTRL
-#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
-#endif
-
-#ifndef MDIO_PMA_10GBR_FEC_ABILITY
-#define MDIO_PMA_10GBR_FEC_ABILITY 0x00aa
-#endif
-
-#ifndef MDIO_PMA_10GBR_FEC_CTRL
-#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
-#endif
-
-#ifndef MDIO_AN_XNP
-#define MDIO_AN_XNP 0x0016
-#endif
-
-#ifndef MDIO_AN_LPX
-#define MDIO_AN_LPX 0x0019
-#endif
-
-#ifndef MDIO_AN_INTMASK
-#define MDIO_AN_INTMASK 0x8001
-#endif
-
-#ifndef MDIO_AN_INT
-#define MDIO_AN_INT 0x8002
-#endif
-
-#ifndef MDIO_CTRL1_SPEED1G
-#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
-#endif
-
-/* SerDes integration register offsets */
-#define SIR0_KR_RT_1 0x002c
-#define SIR0_STATUS 0x0040
-#define SIR1_SPEED 0x0000
-
-/* SerDes integration register entry bit positions and sizes */
-#define SIR0_KR_RT_1_RESET_INDEX 11
-#define SIR0_KR_RT_1_RESET_WIDTH 1
-#define SIR0_STATUS_RX_READY_INDEX 0
-#define SIR0_STATUS_RX_READY_WIDTH 1
-#define SIR0_STATUS_TX_READY_INDEX 8
-#define SIR0_STATUS_TX_READY_WIDTH 1
-#define SIR1_SPEED_CDR_RATE_INDEX 12
-#define SIR1_SPEED_CDR_RATE_WIDTH 4
-#define SIR1_SPEED_DATARATE_INDEX 4
-#define SIR1_SPEED_DATARATE_WIDTH 2
-#define SIR1_SPEED_PLLSEL_INDEX 3
-#define SIR1_SPEED_PLLSEL_WIDTH 1
-#define SIR1_SPEED_RATECHANGE_INDEX 6
-#define SIR1_SPEED_RATECHANGE_WIDTH 1
-#define SIR1_SPEED_TXAMP_INDEX 8
-#define SIR1_SPEED_TXAMP_WIDTH 4
-#define SIR1_SPEED_WORDMODE_INDEX 0
-#define SIR1_SPEED_WORDMODE_WIDTH 3
-
-#define SPEED_10000_BLWC 0
-#define SPEED_10000_CDR 0x7
-#define SPEED_10000_PLL 0x1
-#define SPEED_10000_PQ 0x12
-#define SPEED_10000_RATE 0x0
-#define SPEED_10000_TXAMP 0xa
-#define SPEED_10000_WORD 0x7
-#define SPEED_10000_DFE_TAP_CONFIG 0x1
-#define SPEED_10000_DFE_TAP_ENABLE 0x7f
-
-#define SPEED_2500_BLWC 1
-#define SPEED_2500_CDR 0x2
-#define SPEED_2500_PLL 0x0
-#define SPEED_2500_PQ 0xa
-#define SPEED_2500_RATE 0x1
-#define SPEED_2500_TXAMP 0xf
-#define SPEED_2500_WORD 0x1
-#define SPEED_2500_DFE_TAP_CONFIG 0x3
-#define SPEED_2500_DFE_TAP_ENABLE 0x0
-
-#define SPEED_1000_BLWC 1
-#define SPEED_1000_CDR 0x2
-#define SPEED_1000_PLL 0x0
-#define SPEED_1000_PQ 0xa
-#define SPEED_1000_RATE 0x3
-#define SPEED_1000_TXAMP 0xf
-#define SPEED_1000_WORD 0x1
-#define SPEED_1000_DFE_TAP_CONFIG 0x3
-#define SPEED_1000_DFE_TAP_ENABLE 0x0
-
-/* SerDes RxTx register offsets */
-#define RXTX_REG6 0x0018
-#define RXTX_REG20 0x0050
-#define RXTX_REG22 0x0058
-#define RXTX_REG114 0x01c8
-#define RXTX_REG129 0x0204
-
-/* SerDes RxTx register entry bit positions and sizes */
-#define RXTX_REG6_RESETB_RXD_INDEX 8
-#define RXTX_REG6_RESETB_RXD_WIDTH 1
-#define RXTX_REG20_BLWC_ENA_INDEX 2
-#define RXTX_REG20_BLWC_ENA_WIDTH 1
-#define RXTX_REG114_PQ_REG_INDEX 9
-#define RXTX_REG114_PQ_REG_WIDTH 7
-#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
-#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
-
-/* Bit setting and getting macros
- * The get macro will extract the current bit field value from within
- * the variable
- *
- * The set macro will clear the current bit field value within the
- * variable and then set the bit field of the variable to the
- * specified value
- */
-#define GET_BITS(_var, _index, _width) \
- (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
-
-#define SET_BITS(_var, _index, _width, _val) \
-do { \
- (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
- (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
-} while (0)
-
-#define XSIR_GET_BITS(_var, _prefix, _field) \
- GET_BITS((_var), \
- _prefix##_##_field##_INDEX, \
- _prefix##_##_field##_WIDTH)
-
-#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
- SET_BITS((_var), \
- _prefix##_##_field##_INDEX, \
- _prefix##_##_field##_WIDTH, (_val))
-
-/* Macros for reading or writing SerDes integration registers
- * The ioread macros will get bit fields or full values using the
- * register definitions formed using the input names
- *
- * The iowrite macros will set bit fields or full values using the
- * register definitions formed using the input names
- */
-#define XSIR0_IOREAD(_priv, _reg) \
- ioread16((_priv)->sir0_regs + _reg)
-
-#define XSIR0_IOREAD_BITS(_priv, _reg, _field) \
- GET_BITS(XSIR0_IOREAD((_priv), _reg), \
- _reg##_##_field##_INDEX, \
- _reg##_##_field##_WIDTH)
-
-#define XSIR0_IOWRITE(_priv, _reg, _val) \
- iowrite16((_val), (_priv)->sir0_regs + _reg)
-
-#define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val) \
-do { \
- u16 reg_val = XSIR0_IOREAD((_priv), _reg); \
- SET_BITS(reg_val, \
- _reg##_##_field##_INDEX, \
- _reg##_##_field##_WIDTH, (_val)); \
- XSIR0_IOWRITE((_priv), _reg, reg_val); \
-} while (0)
-
-#define XSIR1_IOREAD(_priv, _reg) \
- ioread16((_priv)->sir1_regs + _reg)
-
-#define XSIR1_IOREAD_BITS(_priv, _reg, _field) \
- GET_BITS(XSIR1_IOREAD((_priv), _reg), \
- _reg##_##_field##_INDEX, \
- _reg##_##_field##_WIDTH)
-
-#define XSIR1_IOWRITE(_priv, _reg, _val) \
- iowrite16((_val), (_priv)->sir1_regs + _reg)
-
-#define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val) \
-do { \
- u16 reg_val = XSIR1_IOREAD((_priv), _reg); \
- SET_BITS(reg_val, \
- _reg##_##_field##_INDEX, \
- _reg##_##_field##_WIDTH, (_val)); \
- XSIR1_IOWRITE((_priv), _reg, reg_val); \
-} while (0)
-
-/* Macros for reading or writing SerDes RxTx registers
- * The ioread macros will get bit fields or full values using the
- * register definitions formed using the input names
- *
- * The iowrite macros will set bit fields or full values using the
- * register definitions formed using the input names
- */
-#define XRXTX_IOREAD(_priv, _reg) \
- ioread16((_priv)->rxtx_regs + _reg)
-
-#define XRXTX_IOREAD_BITS(_priv, _reg, _field) \
- GET_BITS(XRXTX_IOREAD((_priv), _reg), \
- _reg##_##_field##_INDEX, \
- _reg##_##_field##_WIDTH)
-
-#define XRXTX_IOWRITE(_priv, _reg, _val) \
- iowrite16((_val), (_priv)->rxtx_regs + _reg)
-
-#define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val) \
-do { \
- u16 reg_val = XRXTX_IOREAD((_priv), _reg); \
- SET_BITS(reg_val, \
- _reg##_##_field##_INDEX, \
- _reg##_##_field##_WIDTH, (_val)); \
- XRXTX_IOWRITE((_priv), _reg, reg_val); \
-} while (0)
-
-static const u32 amd_xgbe_phy_serdes_blwc[] = {
- SPEED_1000_BLWC,
- SPEED_2500_BLWC,
- SPEED_10000_BLWC,
-};
-
-static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
- SPEED_1000_CDR,
- SPEED_2500_CDR,
- SPEED_10000_CDR,
-};
-
-static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
- SPEED_1000_PQ,
- SPEED_2500_PQ,
- SPEED_10000_PQ,
-};
-
-static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
- SPEED_1000_TXAMP,
- SPEED_2500_TXAMP,
- SPEED_10000_TXAMP,
-};
-
-static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
- SPEED_1000_DFE_TAP_CONFIG,
- SPEED_2500_DFE_TAP_CONFIG,
- SPEED_10000_DFE_TAP_CONFIG,
-};
-
-static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
- SPEED_1000_DFE_TAP_ENABLE,
- SPEED_2500_DFE_TAP_ENABLE,
- SPEED_10000_DFE_TAP_ENABLE,
-};
-
-enum amd_xgbe_phy_an {
- AMD_XGBE_AN_READY = 0,
- AMD_XGBE_AN_PAGE_RECEIVED,
- AMD_XGBE_AN_INCOMPAT_LINK,
- AMD_XGBE_AN_COMPLETE,
- AMD_XGBE_AN_NO_LINK,
- AMD_XGBE_AN_ERROR,
-};
-
-enum amd_xgbe_phy_rx {
- AMD_XGBE_RX_BPA = 0,
- AMD_XGBE_RX_XNP,
- AMD_XGBE_RX_COMPLETE,
- AMD_XGBE_RX_ERROR,
-};
-
-enum amd_xgbe_phy_mode {
- AMD_XGBE_MODE_KR,
- AMD_XGBE_MODE_KX,
-};
-
-enum amd_xgbe_phy_speedset {
- AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
- AMD_XGBE_PHY_SPEEDSET_2500_10000,
-};
-
-struct amd_xgbe_phy_priv {
- struct platform_device *pdev;
- struct acpi_device *adev;
- struct device *dev;
-
- struct phy_device *phydev;
-
- /* SerDes related mmio resources */
- struct resource *rxtx_res;
- struct resource *sir0_res;
- struct resource *sir1_res;
-
- /* SerDes related mmio registers */
- void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
- void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
- void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
-
- int an_irq;
- char an_irq_name[IFNAMSIZ + 32];
- struct work_struct an_irq_work;
- unsigned int an_irq_allocated;
-
- unsigned int speed_set;
-
- /* SerDes UEFI configurable settings.
- * Switching between modes/speeds requires new values for some
- * SerDes settings. The values can be supplied as device
- * properties in array format. The first array entry is for
- * 1GbE, second for 2.5GbE and third for 10GbE
- */
- u32 serdes_blwc[XGBE_PHY_SPEEDS];
- u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
- u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
- u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
- u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
- u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
-
- /* Auto-negotiation state machine support */
- struct mutex an_mutex;
- enum amd_xgbe_phy_an an_result;
- enum amd_xgbe_phy_an an_state;
- enum amd_xgbe_phy_rx kr_state;
- enum amd_xgbe_phy_rx kx_state;
- struct work_struct an_work;
- struct workqueue_struct *an_workqueue;
- unsigned int an_supported;
- unsigned int parallel_detect;
- unsigned int fec_ability;
- unsigned long an_start;
-
- unsigned int lpm_ctrl; /* CTRL1 for resume */
-};
-
-static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
-{
- int ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- if (ret < 0)
- return ret;
-
- ret |= XGBE_PHY_KR_TRAINING_ENABLE;
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
-
- return 0;
-}
-
-static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
-{
- int ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- if (ret < 0)
- return ret;
-
- ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
-
- return 0;
-}
-
-static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
-{
- int ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- return ret;
-
- ret |= MDIO_CTRL1_LPOWER;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
- usleep_range(75, 100);
-
- ret &= ~MDIO_CTRL1_LPOWER;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
- return 0;
-}
-
-static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
-
- /* Assert Rx and Tx ratechange */
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
-}
-
-static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- unsigned int wait;
- u16 status;
-
- /* Release Rx and Tx ratechange */
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
-
- /* Wait for Rx and Tx ready */
- wait = XGBE_PHY_RATECHANGE_COUNT;
- while (wait--) {
- usleep_range(50, 75);
-
- status = XSIR0_IOREAD(priv, SIR0_STATUS);
- if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
- XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
- goto rx_reset;
- }
-
- netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
- status);
-
-rx_reset:
- /* Perform Rx reset for the DFE changes */
- XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
-}
-
-static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- /* Enable KR training */
- ret = amd_xgbe_an_enable_kr_training(phydev);
- if (ret < 0)
- return ret;
-
- /* Set PCS to KR/10G speed */
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
- if (ret < 0)
- return ret;
-
- ret &= ~MDIO_PCS_CTRL2_TYPE;
- ret |= MDIO_PCS_CTRL2_10GBR;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- return ret;
-
- ret &= ~MDIO_CTRL1_SPEEDSEL;
- ret |= MDIO_CTRL1_SPEED10G;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
- ret = amd_xgbe_phy_pcs_power_cycle(phydev);
- if (ret < 0)
- return ret;
-
- /* Set SerDes to 10G speed */
- amd_xgbe_phy_serdes_start_ratechange(phydev);
-
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
-
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
- priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
- priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
- priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
- priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
- priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
- XRXTX_IOWRITE(priv, RXTX_REG22,
- priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
-
- amd_xgbe_phy_serdes_complete_ratechange(phydev);
-
- return 0;
-}
-
-static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- /* Disable KR training */
- ret = amd_xgbe_an_disable_kr_training(phydev);
- if (ret < 0)
- return ret;
-
- /* Set PCS to KX/1G speed */
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
- if (ret < 0)
- return ret;
-
- ret &= ~MDIO_PCS_CTRL2_TYPE;
- ret |= MDIO_PCS_CTRL2_10GBX;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- return ret;
-
- ret &= ~MDIO_CTRL1_SPEEDSEL;
- ret |= MDIO_CTRL1_SPEED1G;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
- ret = amd_xgbe_phy_pcs_power_cycle(phydev);
- if (ret < 0)
- return ret;
-
- /* Set SerDes to 2.5G speed */
- amd_xgbe_phy_serdes_start_ratechange(phydev);
-
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
-
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
- priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
- priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
- priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
- priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
- priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
- XRXTX_IOWRITE(priv, RXTX_REG22,
- priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
-
- amd_xgbe_phy_serdes_complete_ratechange(phydev);
-
- return 0;
-}
-
-static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- /* Disable KR training */
- ret = amd_xgbe_an_disable_kr_training(phydev);
- if (ret < 0)
- return ret;
-
- /* Set PCS to KX/1G speed */
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
- if (ret < 0)
- return ret;
-
- ret &= ~MDIO_PCS_CTRL2_TYPE;
- ret |= MDIO_PCS_CTRL2_10GBX;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- return ret;
-
- ret &= ~MDIO_CTRL1_SPEEDSEL;
- ret |= MDIO_CTRL1_SPEED1G;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
- ret = amd_xgbe_phy_pcs_power_cycle(phydev);
- if (ret < 0)
- return ret;
-
- /* Set SerDes to 1G speed */
- amd_xgbe_phy_serdes_start_ratechange(phydev);
-
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
-
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
- priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
- priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
- priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
- priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
- priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
- XRXTX_IOWRITE(priv, RXTX_REG22,
- priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
-
- amd_xgbe_phy_serdes_complete_ratechange(phydev);
-
- return 0;
-}
-
-static int amd_xgbe_phy_cur_mode(struct phy_device *phydev,
- enum amd_xgbe_phy_mode *mode)
-{
- int ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
- if (ret < 0)
- return ret;
-
- if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
- *mode = AMD_XGBE_MODE_KR;
- else
- *mode = AMD_XGBE_MODE_KX;
-
- return 0;
-}
-
-static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev)
-{
- enum amd_xgbe_phy_mode mode;
-
- if (amd_xgbe_phy_cur_mode(phydev, &mode))
- return false;
-
- return (mode == AMD_XGBE_MODE_KR);
-}
-
-static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- /* If we are in KR switch to KX, and vice-versa */
- if (amd_xgbe_phy_in_kr_mode(phydev)) {
- if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
- ret = amd_xgbe_phy_gmii_mode(phydev);
- else
- ret = amd_xgbe_phy_gmii_2500_mode(phydev);
- } else {
- ret = amd_xgbe_phy_xgmii_mode(phydev);
- }
-
- return ret;
-}
-
-static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
- enum amd_xgbe_phy_mode mode)
-{
- enum amd_xgbe_phy_mode cur_mode;
- int ret;
-
- ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode);
- if (ret)
- return ret;
-
- if (mode != cur_mode)
- ret = amd_xgbe_phy_switch_mode(phydev);
-
- return ret;
-}
-
-static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev)
-{
- if (phydev->autoneg == AUTONEG_ENABLE) {
- if (phydev->advertising & ADVERTISED_10000baseKR_Full)
- return true;
- } else {
- if (phydev->speed == SPEED_10000)
- return true;
- }
-
- return false;
-}
-
-static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev)
-{
- if (phydev->autoneg == AUTONEG_ENABLE) {
- if (phydev->advertising & ADVERTISED_2500baseX_Full)
- return true;
- } else {
- if (phydev->speed == SPEED_2500)
- return true;
- }
-
- return false;
-}
-
-static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev)
-{
- if (phydev->autoneg == AUTONEG_ENABLE) {
- if (phydev->advertising & ADVERTISED_1000baseKX_Full)
- return true;
- } else {
- if (phydev->speed == SPEED_1000)
- return true;
- }
-
- return false;
-}
-
-static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
- bool restart)
-{
- int ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
- if (ret < 0)
- return ret;
-
- ret &= ~MDIO_AN_CTRL1_ENABLE;
-
- if (enable)
- ret |= MDIO_AN_CTRL1_ENABLE;
-
- if (restart)
- ret |= MDIO_AN_CTRL1_RESTART;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
-
- return 0;
-}
-
-static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
-{
- return amd_xgbe_phy_set_an(phydev, true, true);
-}
-
-static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
-{
- return amd_xgbe_phy_set_an(phydev, false, false);
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
- enum amd_xgbe_phy_rx *state)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ad_reg, lp_reg, ret;
-
- *state = AMD_XGBE_RX_COMPLETE;
-
- /* If we're not in KR mode then we're done */
- if (!amd_xgbe_phy_in_kr_mode(phydev))
- return AMD_XGBE_AN_PAGE_RECEIVED;
-
- /* Enable/Disable FEC */
- ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
- if (ad_reg < 0)
- return AMD_XGBE_AN_ERROR;
-
- lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
- if (lp_reg < 0)
- return AMD_XGBE_AN_ERROR;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
- if (ret < 0)
- return AMD_XGBE_AN_ERROR;
-
- ret &= ~XGBE_PHY_FEC_MASK;
- if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
- ret |= priv->fec_ability;
-
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
-
- /* Start KR training */
- ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- if (ret < 0)
- return AMD_XGBE_AN_ERROR;
-
- if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
- XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
-
- ret |= XGBE_PHY_KR_TRAINING_START;
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
- ret);
-
- XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
- }
-
- return AMD_XGBE_AN_PAGE_RECEIVED;
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
- enum amd_xgbe_phy_rx *state)
-{
- u16 msg;
-
- *state = AMD_XGBE_RX_XNP;
-
- msg = XNP_MCF_NULL_MESSAGE;
- msg |= XNP_MP_FORMATTED;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
-
- return AMD_XGBE_AN_PAGE_RECEIVED;
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
- enum amd_xgbe_phy_rx *state)
-{
- unsigned int link_support;
- int ret, ad_reg, lp_reg;
-
- /* Read Base Ability register 2 first */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
- if (ret < 0)
- return AMD_XGBE_AN_ERROR;
-
- /* Check for a supported mode, otherwise restart in a different one */
- link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20;
- if (!(ret & link_support))
- return AMD_XGBE_AN_INCOMPAT_LINK;
-
- /* Check Extended Next Page support */
- ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
- if (ad_reg < 0)
- return AMD_XGBE_AN_ERROR;
-
- lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
- if (lp_reg < 0)
- return AMD_XGBE_AN_ERROR;
-
- return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
- amd_xgbe_an_tx_xnp(phydev, state) :
- amd_xgbe_an_tx_training(phydev, state);
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
- enum amd_xgbe_phy_rx *state)
-{
- int ad_reg, lp_reg;
-
- /* Check Extended Next Page support */
- ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
- if (ad_reg < 0)
- return AMD_XGBE_AN_ERROR;
-
- lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
- if (lp_reg < 0)
- return AMD_XGBE_AN_ERROR;
-
- return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
- amd_xgbe_an_tx_xnp(phydev, state) :
- amd_xgbe_an_tx_training(phydev, state);
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- enum amd_xgbe_phy_rx *state;
- unsigned long an_timeout;
- int ret;
-
- if (!priv->an_start) {
- priv->an_start = jiffies;
- } else {
- an_timeout = priv->an_start +
- msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
- if (time_after(jiffies, an_timeout)) {
- /* Auto-negotiation timed out, reset state */
- priv->kr_state = AMD_XGBE_RX_BPA;
- priv->kx_state = AMD_XGBE_RX_BPA;
-
- priv->an_start = jiffies;
- }
- }
-
- state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
- : &priv->kx_state;
-
- switch (*state) {
- case AMD_XGBE_RX_BPA:
- ret = amd_xgbe_an_rx_bpa(phydev, state);
- break;
-
- case AMD_XGBE_RX_XNP:
- ret = amd_xgbe_an_rx_xnp(phydev, state);
- break;
-
- default:
- ret = AMD_XGBE_AN_ERROR;
- }
-
- return ret;
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- /* Be sure we aren't looping trying to negotiate */
- if (amd_xgbe_phy_in_kr_mode(phydev)) {
- priv->kr_state = AMD_XGBE_RX_ERROR;
-
- if (!(phydev->advertising & SUPPORTED_1000baseKX_Full) &&
- !(phydev->advertising & SUPPORTED_2500baseX_Full))
- return AMD_XGBE_AN_NO_LINK;
-
- if (priv->kx_state != AMD_XGBE_RX_BPA)
- return AMD_XGBE_AN_NO_LINK;
- } else {
- priv->kx_state = AMD_XGBE_RX_ERROR;
-
- if (!(phydev->advertising & SUPPORTED_10000baseKR_Full))
- return AMD_XGBE_AN_NO_LINK;
-
- if (priv->kr_state != AMD_XGBE_RX_BPA)
- return AMD_XGBE_AN_NO_LINK;
- }
-
- ret = amd_xgbe_phy_disable_an(phydev);
- if (ret)
- return AMD_XGBE_AN_ERROR;
-
- ret = amd_xgbe_phy_switch_mode(phydev);
- if (ret)
- return AMD_XGBE_AN_ERROR;
-
- ret = amd_xgbe_phy_restart_an(phydev);
- if (ret)
- return AMD_XGBE_AN_ERROR;
-
- return AMD_XGBE_AN_INCOMPAT_LINK;
-}
-
-static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
-{
- struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
-
- /* Interrupt reason must be read and cleared outside of IRQ context */
- disable_irq_nosync(priv->an_irq);
-
- queue_work(priv->an_workqueue, &priv->an_irq_work);
-
- return IRQ_HANDLED;
-}
-
-static void amd_xgbe_an_irq_work(struct work_struct *work)
-{
- struct amd_xgbe_phy_priv *priv = container_of(work,
- struct amd_xgbe_phy_priv,
- an_irq_work);
-
- /* Avoid a race between enabling the IRQ and exiting the work by
- * waiting for the work to finish and then queueing it
- */
- flush_work(&priv->an_work);
- queue_work(priv->an_workqueue, &priv->an_work);
-}
-
-static void amd_xgbe_an_state_machine(struct work_struct *work)
-{
- struct amd_xgbe_phy_priv *priv = container_of(work,
- struct amd_xgbe_phy_priv,
- an_work);
- struct phy_device *phydev = priv->phydev;
- enum amd_xgbe_phy_an cur_state = priv->an_state;
- int int_reg, int_mask;
-
- mutex_lock(&priv->an_mutex);
-
- /* Read the interrupt */
- int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
- if (!int_reg)
- goto out;
-
-next_int:
- if (int_reg < 0) {
- priv->an_state = AMD_XGBE_AN_ERROR;
- int_mask = XGBE_AN_INT_MASK;
- } else if (int_reg & XGBE_AN_PG_RCV) {
- priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
- int_mask = XGBE_AN_PG_RCV;
- } else if (int_reg & XGBE_AN_INC_LINK) {
- priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
- int_mask = XGBE_AN_INC_LINK;
- } else if (int_reg & XGBE_AN_INT_CMPLT) {
- priv->an_state = AMD_XGBE_AN_COMPLETE;
- int_mask = XGBE_AN_INT_CMPLT;
- } else {
- priv->an_state = AMD_XGBE_AN_ERROR;
- int_mask = 0;
- }
-
- /* Clear the interrupt to be processed */
- int_reg &= ~int_mask;
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
-
- priv->an_result = priv->an_state;
-
-again:
- cur_state = priv->an_state;
-
- switch (priv->an_state) {
- case AMD_XGBE_AN_READY:
- priv->an_supported = 0;
- break;
-
- case AMD_XGBE_AN_PAGE_RECEIVED:
- priv->an_state = amd_xgbe_an_page_received(phydev);
- priv->an_supported++;
- break;
-
- case AMD_XGBE_AN_INCOMPAT_LINK:
- priv->an_supported = 0;
- priv->parallel_detect = 0;
- priv->an_state = amd_xgbe_an_incompat_link(phydev);
- break;
-
- case AMD_XGBE_AN_COMPLETE:
- priv->parallel_detect = priv->an_supported ? 0 : 1;
- netdev_dbg(phydev->attached_dev, "%s successful\n",
- priv->an_supported ? "Auto negotiation"
- : "Parallel detection");
- break;
-
- case AMD_XGBE_AN_NO_LINK:
- break;
-
- default:
- priv->an_state = AMD_XGBE_AN_ERROR;
- }
-
- if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
- int_reg = 0;
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
- } else if (priv->an_state == AMD_XGBE_AN_ERROR) {
- netdev_err(phydev->attached_dev,
- "error during auto-negotiation, state=%u\n",
- cur_state);
-
- int_reg = 0;
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
- }
-
- if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
- priv->an_result = priv->an_state;
- priv->an_state = AMD_XGBE_AN_READY;
- priv->kr_state = AMD_XGBE_RX_BPA;
- priv->kx_state = AMD_XGBE_RX_BPA;
- priv->an_start = 0;
- }
-
- if (cur_state != priv->an_state)
- goto again;
-
- if (int_reg)
- goto next_int;
-
-out:
- enable_irq(priv->an_irq);
-
- mutex_unlock(&priv->an_mutex);
-}
-
-static int amd_xgbe_an_init(struct phy_device *phydev)
-{
- int ret;
-
- /* Set up Advertisement register 3 first */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
- if (ret < 0)
- return ret;
-
- if (phydev->advertising & SUPPORTED_10000baseR_FEC)
- ret |= 0xc000;
- else
- ret &= ~0xc000;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
-
- /* Set up Advertisement register 2 next */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
- if (ret < 0)
- return ret;
-
- if (phydev->advertising & SUPPORTED_10000baseKR_Full)
- ret |= 0x80;
- else
- ret &= ~0x80;
-
- if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
- (phydev->advertising & SUPPORTED_2500baseX_Full))
- ret |= 0x20;
- else
- ret &= ~0x20;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
-
- /* Set up Advertisement register 1 last */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
- if (ret < 0)
- return ret;
-
- if (phydev->advertising & SUPPORTED_Pause)
- ret |= 0x400;
- else
- ret &= ~0x400;
-
- if (phydev->advertising & SUPPORTED_Asym_Pause)
- ret |= 0x800;
- else
- ret &= ~0x800;
-
- /* We don't intend to perform XNP */
- ret &= ~XNP_NP_EXCHANGE;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
-
- return 0;
-}
-
-static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
-{
- int count, ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- return ret;
-
- ret |= MDIO_CTRL1_RESET;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
- count = 50;
- do {
- msleep(20);
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- return ret;
- } while ((ret & MDIO_CTRL1_RESET) && --count);
-
- if (ret & MDIO_CTRL1_RESET)
- return -ETIMEDOUT;
-
- /* Disable auto-negotiation for now */
- ret = amd_xgbe_phy_disable_an(phydev);
- if (ret < 0)
- return ret;
-
- /* Clear auto-negotiation interrupts */
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
-
- return 0;
-}
-
-static int amd_xgbe_phy_config_init(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- struct net_device *netdev = phydev->attached_dev;
- int ret;
-
- if (!priv->an_irq_allocated) {
- /* Allocate the auto-negotiation workqueue and interrupt */
- snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
- "%s-pcs", netdev_name(netdev));
-
- priv->an_workqueue =
- create_singlethread_workqueue(priv->an_irq_name);
- if (!priv->an_workqueue) {
- netdev_err(netdev, "phy workqueue creation failed\n");
- return -ENOMEM;
- }
-
- ret = devm_request_irq(priv->dev, priv->an_irq,
- amd_xgbe_an_isr, 0, priv->an_irq_name,
- priv);
- if (ret) {
- netdev_err(netdev, "phy irq request failed\n");
- destroy_workqueue(priv->an_workqueue);
- return ret;
- }
-
- priv->an_irq_allocated = 1;
- }
-
- /* Set initial mode - call the mode setting routines
- * directly to insure we are properly configured
- */
- if (amd_xgbe_phy_use_xgmii_mode(phydev))
- ret = amd_xgbe_phy_xgmii_mode(phydev);
- else if (amd_xgbe_phy_use_gmii_mode(phydev))
- ret = amd_xgbe_phy_gmii_mode(phydev);
- else if (amd_xgbe_phy_use_gmii_2500_mode(phydev))
- ret = amd_xgbe_phy_gmii_2500_mode(phydev);
- else
- ret = -EINVAL;
- if (ret < 0)
- return ret;
-
- /* Set up advertisement registers based on current settings */
- ret = amd_xgbe_an_init(phydev);
- if (ret)
- return ret;
-
- /* Enable auto-negotiation interrupts */
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
-
- return 0;
-}
-
-static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
-{
- int ret;
-
- /* Disable auto-negotiation */
- ret = amd_xgbe_phy_disable_an(phydev);
- if (ret < 0)
- return ret;
-
- /* Validate/Set specified speed */
- switch (phydev->speed) {
- case SPEED_10000:
- ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
- break;
-
- case SPEED_2500:
- case SPEED_1000:
- ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
- break;
-
- default:
- ret = -EINVAL;
- }
-
- if (ret < 0)
- return ret;
-
- /* Validate duplex mode */
- if (phydev->duplex != DUPLEX_FULL)
- return -EINVAL;
-
- phydev->pause = 0;
- phydev->asym_pause = 0;
-
- return 0;
-}
-
-static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- u32 mmd_mask = phydev->c45_ids.devices_in_package;
- int ret;
-
- if (phydev->autoneg != AUTONEG_ENABLE)
- return amd_xgbe_phy_setup_forced(phydev);
-
- /* Make sure we have the AN MMD present */
- if (!(mmd_mask & MDIO_DEVS_AN))
- return -EINVAL;
-
- /* Disable auto-negotiation interrupt */
- disable_irq(priv->an_irq);
-
- /* Start auto-negotiation in a supported mode */
- if (phydev->advertising & SUPPORTED_10000baseKR_Full)
- ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
- else if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
- (phydev->advertising & SUPPORTED_2500baseX_Full))
- ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
- else
- ret = -EINVAL;
- if (ret < 0) {
- enable_irq(priv->an_irq);
- return ret;
- }
-
- /* Disable and stop any in progress auto-negotiation */
- ret = amd_xgbe_phy_disable_an(phydev);
- if (ret < 0)
- return ret;
-
- /* Clear any auto-negotitation interrupts */
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
-
- priv->an_result = AMD_XGBE_AN_READY;
- priv->an_state = AMD_XGBE_AN_READY;
- priv->kr_state = AMD_XGBE_RX_BPA;
- priv->kx_state = AMD_XGBE_RX_BPA;
-
- /* Re-enable auto-negotiation interrupt */
- enable_irq(priv->an_irq);
-
- /* Set up advertisement registers based on current settings */
- ret = amd_xgbe_an_init(phydev);
- if (ret)
- return ret;
-
- /* Enable and start auto-negotiation */
- return amd_xgbe_phy_restart_an(phydev);
-}
-
-static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- mutex_lock(&priv->an_mutex);
-
- ret = __amd_xgbe_phy_config_aneg(phydev);
-
- mutex_unlock(&priv->an_mutex);
-
- return ret;
-}
-
-static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
-
- return (priv->an_result == AMD_XGBE_AN_COMPLETE);
-}
-
-static int amd_xgbe_phy_update_link(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- /* If we're doing auto-negotiation don't report link down */
- if (priv->an_state != AMD_XGBE_AN_READY) {
- phydev->link = 1;
- return 0;
- }
-
- /* Link status is latched low, so read once to clear
- * and then read again to get current state
- */
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
- if (ret < 0)
- return ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
- if (ret < 0)
- return ret;
-
- phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
-
- return 0;
-}
-
-static int amd_xgbe_phy_read_status(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- u32 mmd_mask = phydev->c45_ids.devices_in_package;
- int ret, ad_ret, lp_ret;
-
- ret = amd_xgbe_phy_update_link(phydev);
- if (ret)
- return ret;
-
- if ((phydev->autoneg == AUTONEG_ENABLE) &&
- !priv->parallel_detect) {
- if (!(mmd_mask & MDIO_DEVS_AN))
- return -EINVAL;
-
- if (!amd_xgbe_phy_aneg_done(phydev))
- return 0;
-
- /* Compare Advertisement and Link Partner register 1 */
- ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
- if (ad_ret < 0)
- return ad_ret;
- lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
- if (lp_ret < 0)
- return lp_ret;
-
- ad_ret &= lp_ret;
- phydev->pause = (ad_ret & 0x400) ? 1 : 0;
- phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
-
- /* Compare Advertisement and Link Partner register 2 */
- ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
- MDIO_AN_ADVERTISE + 1);
- if (ad_ret < 0)
- return ad_ret;
- lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
- if (lp_ret < 0)
- return lp_ret;
-
- ad_ret &= lp_ret;
- if (ad_ret & 0x80) {
- phydev->speed = SPEED_10000;
- ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
- if (ret)
- return ret;
- } else {
- switch (priv->speed_set) {
- case AMD_XGBE_PHY_SPEEDSET_1000_10000:
- phydev->speed = SPEED_1000;
- break;
-
- case AMD_XGBE_PHY_SPEEDSET_2500_10000:
- phydev->speed = SPEED_2500;
- break;
- }
-
- ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
- if (ret)
- return ret;
- }
-
- phydev->duplex = DUPLEX_FULL;
- } else {
- if (amd_xgbe_phy_in_kr_mode(phydev)) {
- phydev->speed = SPEED_10000;
- } else {
- switch (priv->speed_set) {
- case AMD_XGBE_PHY_SPEEDSET_1000_10000:
- phydev->speed = SPEED_1000;
- break;
-
- case AMD_XGBE_PHY_SPEEDSET_2500_10000:
- phydev->speed = SPEED_2500;
- break;
- }
- }
- phydev->duplex = DUPLEX_FULL;
- phydev->pause = 0;
- phydev->asym_pause = 0;
- }
-
- return 0;
-}
-
-static int amd_xgbe_phy_suspend(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- mutex_lock(&phydev->lock);
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- goto unlock;
-
- priv->lpm_ctrl = ret;
-
- ret |= MDIO_CTRL1_LPOWER;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
- ret = 0;
-
-unlock:
- mutex_unlock(&phydev->lock);
-
- return ret;
-}
-
-static int amd_xgbe_phy_resume(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
-
- mutex_lock(&phydev->lock);
-
- priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
-
- mutex_unlock(&phydev->lock);
-
- return 0;
-}
-
-static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
- unsigned int type)
-{
- unsigned int count;
- int i;
-
- for (i = 0, count = 0; i < pdev->num_resources; i++) {
- struct resource *r = &pdev->resource[i];
-
- if (type == resource_type(r))
- count++;
- }
-
- return count;
-}
-
-static int amd_xgbe_phy_probe(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv;
- struct platform_device *phy_pdev;
- struct device *dev, *phy_dev;
- unsigned int phy_resnum, phy_irqnum;
- int ret;
-
- if (!phydev->bus || !phydev->bus->parent)
- return -EINVAL;
-
- dev = phydev->bus->parent;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->pdev = to_platform_device(dev);
- priv->adev = ACPI_COMPANION(dev);
- priv->dev = dev;
- priv->phydev = phydev;
- mutex_init(&priv->an_mutex);
- INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
- INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
-
- if (!priv->adev || acpi_disabled) {
- struct device_node *bus_node;
- struct device_node *phy_node;
-
- bus_node = priv->dev->of_node;
- phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
- if (!phy_node) {
- dev_err(dev, "unable to parse phy-handle\n");
- ret = -EINVAL;
- goto err_priv;
- }
-
- phy_pdev = of_find_device_by_node(phy_node);
- of_node_put(phy_node);
-
- if (!phy_pdev) {
- dev_err(dev, "unable to obtain phy device\n");
- ret = -EINVAL;
- goto err_priv;
- }
-
- phy_resnum = 0;
- phy_irqnum = 0;
- } else {
- /* In ACPI, the XGBE and PHY resources are the grouped
- * together with the PHY resources at the end
- */
- phy_pdev = priv->pdev;
- phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
- IORESOURCE_MEM) - 3;
- phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
- IORESOURCE_IRQ) - 1;
- }
- phy_dev = &phy_pdev->dev;
-
- /* Get the device mmio areas */
- priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
- phy_resnum++);
- priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
- if (IS_ERR(priv->rxtx_regs)) {
- dev_err(dev, "rxtx ioremap failed\n");
- ret = PTR_ERR(priv->rxtx_regs);
- goto err_put;
- }
-
- priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
- phy_resnum++);
- priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
- if (IS_ERR(priv->sir0_regs)) {
- dev_err(dev, "sir0 ioremap failed\n");
- ret = PTR_ERR(priv->sir0_regs);
- goto err_rxtx;
- }
-
- priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
- phy_resnum++);
- priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
- if (IS_ERR(priv->sir1_regs)) {
- dev_err(dev, "sir1 ioremap failed\n");
- ret = PTR_ERR(priv->sir1_regs);
- goto err_sir0;
- }
-
- /* Get the auto-negotiation interrupt */
- ret = platform_get_irq(phy_pdev, phy_irqnum);
- if (ret < 0) {
- dev_err(dev, "platform_get_irq failed\n");
- goto err_sir1;
- }
- priv->an_irq = ret;
-
- /* Get the device speed set property */
- ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
- &priv->speed_set);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_SPEEDSET_PROPERTY);
- goto err_sir1;
- }
-
- switch (priv->speed_set) {
- case AMD_XGBE_PHY_SPEEDSET_1000_10000:
- case AMD_XGBE_PHY_SPEEDSET_2500_10000:
- break;
- default:
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_SPEEDSET_PROPERTY);
- ret = -EINVAL;
- goto err_sir1;
- }
-
- if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_PHY_BLWC_PROPERTY,
- priv->serdes_blwc,
- XGBE_PHY_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_BLWC_PROPERTY);
- goto err_sir1;
- }
- } else {
- memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
- sizeof(priv->serdes_blwc));
- }
-
- if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_PHY_CDR_RATE_PROPERTY,
- priv->serdes_cdr_rate,
- XGBE_PHY_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_CDR_RATE_PROPERTY);
- goto err_sir1;
- }
- } else {
- memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
- sizeof(priv->serdes_cdr_rate));
- }
-
- if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_PHY_PQ_SKEW_PROPERTY,
- priv->serdes_pq_skew,
- XGBE_PHY_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_PQ_SKEW_PROPERTY);
- goto err_sir1;
- }
- } else {
- memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
- sizeof(priv->serdes_pq_skew));
- }
-
- if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_PHY_TX_AMP_PROPERTY,
- priv->serdes_tx_amp,
- XGBE_PHY_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_TX_AMP_PROPERTY);
- goto err_sir1;
- }
- } else {
- memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
- sizeof(priv->serdes_tx_amp));
- }
-
- if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_PHY_DFE_CFG_PROPERTY,
- priv->serdes_dfe_tap_cfg,
- XGBE_PHY_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_DFE_CFG_PROPERTY);
- goto err_sir1;
- }
- } else {
- memcpy(priv->serdes_dfe_tap_cfg,
- amd_xgbe_phy_serdes_dfe_tap_cfg,
- sizeof(priv->serdes_dfe_tap_cfg));
- }
-
- if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_PHY_DFE_ENA_PROPERTY,
- priv->serdes_dfe_tap_ena,
- XGBE_PHY_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_DFE_ENA_PROPERTY);
- goto err_sir1;
- }
- } else {
- memcpy(priv->serdes_dfe_tap_ena,
- amd_xgbe_phy_serdes_dfe_tap_ena,
- sizeof(priv->serdes_dfe_tap_ena));
- }
-
- /* Initialize supported features */
- phydev->supported = SUPPORTED_Autoneg;
- phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- phydev->supported |= SUPPORTED_Backplane;
- phydev->supported |= SUPPORTED_10000baseKR_Full;
- switch (priv->speed_set) {
- case AMD_XGBE_PHY_SPEEDSET_1000_10000:
- phydev->supported |= SUPPORTED_1000baseKX_Full;
- break;
- case AMD_XGBE_PHY_SPEEDSET_2500_10000:
- phydev->supported |= SUPPORTED_2500baseX_Full;
- break;
- }
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
- if (ret < 0)
- return ret;
- priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
- if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
- phydev->supported |= SUPPORTED_10000baseR_FEC;
-
- phydev->advertising = phydev->supported;
-
- phydev->priv = priv;
-
- if (!priv->adev || acpi_disabled)
- platform_device_put(phy_pdev);
-
- return 0;
-
-err_sir1:
- devm_iounmap(dev, priv->sir1_regs);
- devm_release_mem_region(dev, priv->sir1_res->start,
- resource_size(priv->sir1_res));
-
-err_sir0:
- devm_iounmap(dev, priv->sir0_regs);
- devm_release_mem_region(dev, priv->sir0_res->start,
- resource_size(priv->sir0_res));
-
-err_rxtx:
- devm_iounmap(dev, priv->rxtx_regs);
- devm_release_mem_region(dev, priv->rxtx_res->start,
- resource_size(priv->rxtx_res));
-
-err_put:
- if (!priv->adev || acpi_disabled)
- platform_device_put(phy_pdev);
-
-err_priv:
- devm_kfree(dev, priv);
-
- return ret;
-}
-
-static void amd_xgbe_phy_remove(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- struct device *dev = priv->dev;
-
- if (priv->an_irq_allocated) {
- devm_free_irq(dev, priv->an_irq, priv);
-
- flush_workqueue(priv->an_workqueue);
- destroy_workqueue(priv->an_workqueue);
- }
-
- /* Release resources */
- devm_iounmap(dev, priv->sir1_regs);
- devm_release_mem_region(dev, priv->sir1_res->start,
- resource_size(priv->sir1_res));
-
- devm_iounmap(dev, priv->sir0_regs);
- devm_release_mem_region(dev, priv->sir0_res->start,
- resource_size(priv->sir0_res));
-
- devm_iounmap(dev, priv->rxtx_regs);
- devm_release_mem_region(dev, priv->rxtx_res->start,
- resource_size(priv->rxtx_res));
-
- devm_kfree(dev, priv);
-}
-
-static int amd_xgbe_match_phy_device(struct phy_device *phydev)
-{
- return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
-}
-
-static struct phy_driver amd_xgbe_phy_driver[] = {
- {
- .phy_id = XGBE_PHY_ID,
- .phy_id_mask = XGBE_PHY_MASK,
- .name = "AMD XGBE PHY",
- .features = 0,
- .flags = PHY_IS_INTERNAL,
- .probe = amd_xgbe_phy_probe,
- .remove = amd_xgbe_phy_remove,
- .soft_reset = amd_xgbe_phy_soft_reset,
- .config_init = amd_xgbe_phy_config_init,
- .suspend = amd_xgbe_phy_suspend,
- .resume = amd_xgbe_phy_resume,
- .config_aneg = amd_xgbe_phy_config_aneg,
- .aneg_done = amd_xgbe_phy_aneg_done,
- .read_status = amd_xgbe_phy_read_status,
- .match_phy_device = amd_xgbe_match_phy_device,
- .driver = {
- .owner = THIS_MODULE,
- },
- },
-};
-
-module_phy_driver(amd_xgbe_phy_driver);
-
-static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
- { XGBE_PHY_ID, XGBE_PHY_MASK },
- { }
-};
-MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b5dc59de094e..4dea85bfc545 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -136,8 +136,8 @@ static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
/* AFE_RX_LP_COUNTER, set RX bandwidth to maximum */
phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
- /* AFE_TX_CONFIG, set 1000BT Cfeed=110 for all ports */
- phy_write_misc(phydev, AFE_TX_CONFIG, 0x0061);
+ /* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */
+ phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
/* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
@@ -167,6 +167,9 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev)
/* AFE_RXCONFIG_1, provide more margin for INL/DNL measurement */
phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
+ /* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */
+ phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
+
/* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
new file mode 100644
index 000000000000..c7a12e2e07b7
--- /dev/null
+++ b/drivers/net/phy/dp83867.c
@@ -0,0 +1,239 @@
+/*
+ * Driver for the Texas Instruments DP83867 PHY
+ *
+ * Copyright (C) 2015 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+
+#include <dt-bindings/net/ti-dp83867.h>
+
+#define DP83867_PHY_ID 0x2000a231
+#define DP83867_DEVADDR 0x1f
+
+#define MII_DP83867_PHYCTRL 0x10
+#define MII_DP83867_MICR 0x12
+#define MII_DP83867_ISR 0x13
+#define DP83867_CTRL 0x1f
+
+/* Extended Registers */
+#define DP83867_RGMIICTL 0x0032
+#define DP83867_RGMIIDCTL 0x0086
+
+#define DP83867_SW_RESET BIT(15)
+#define DP83867_SW_RESTART BIT(14)
+
+/* MICR Interrupt bits */
+#define MII_DP83867_MICR_AN_ERR_INT_EN BIT(15)
+#define MII_DP83867_MICR_SPEED_CHNG_INT_EN BIT(14)
+#define MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN BIT(13)
+#define MII_DP83867_MICR_PAGE_RXD_INT_EN BIT(12)
+#define MII_DP83867_MICR_AUTONEG_COMP_INT_EN BIT(11)
+#define MII_DP83867_MICR_LINK_STS_CHNG_INT_EN BIT(10)
+#define MII_DP83867_MICR_FALSE_CARRIER_INT_EN BIT(8)
+#define MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN BIT(4)
+#define MII_DP83867_MICR_WOL_INT_EN BIT(3)
+#define MII_DP83867_MICR_XGMII_ERR_INT_EN BIT(2)
+#define MII_DP83867_MICR_POL_CHNG_INT_EN BIT(1)
+#define MII_DP83867_MICR_JABBER_INT_EN BIT(0)
+
+/* RGMIICTL bits */
+#define DP83867_RGMII_TX_CLK_DELAY_EN BIT(1)
+#define DP83867_RGMII_RX_CLK_DELAY_EN BIT(0)
+
+/* PHY CTRL bits */
+#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
+
+/* RGMIIDCTL bits */
+#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
+
+struct dp83867_private {
+ int rx_id_delay;
+ int tx_id_delay;
+ int fifo_depth;
+};
+
+static int dp83867_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_DP83867_ISR);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int dp83867_config_intr(struct phy_device *phydev)
+{
+ int micr_status;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ micr_status = phy_read(phydev, MII_DP83867_MICR);
+ if (micr_status < 0)
+ return micr_status;
+
+ micr_status |=
+ (MII_DP83867_MICR_AN_ERR_INT_EN |
+ MII_DP83867_MICR_SPEED_CHNG_INT_EN |
+ MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
+ MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
+
+ return phy_write(phydev, MII_DP83867_MICR, micr_status);
+ }
+
+ micr_status = 0x0;
+ return phy_write(phydev, MII_DP83867_MICR, micr_status);
+}
+
+#ifdef CONFIG_OF_MDIO
+static int dp83867_of_init(struct phy_device *phydev)
+{
+ struct dp83867_private *dp83867 = phydev->priv;
+ struct device *dev = &phydev->dev;
+ struct device_node *of_node = dev->of_node;
+ int ret;
+
+ if (!of_node && dev->parent->of_node)
+ of_node = dev->parent->of_node;
+
+ if (!phydev->dev.of_node)
+ return -ENODEV;
+
+ ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
+ &dp83867->rx_id_delay);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
+ &dp83867->tx_id_delay);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "ti,fifo-depth",
+ &dp83867->fifo_depth);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+#else
+static int dp83867_of_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int dp83867_config_init(struct phy_device *phydev)
+{
+ struct dp83867_private *dp83867;
+ int ret;
+ u16 val, delay;
+
+ if (!phydev->priv) {
+ dp83867 = devm_kzalloc(&phydev->dev, sizeof(*dp83867),
+ GFP_KERNEL);
+ if (!dp83867)
+ return -ENOMEM;
+
+ phydev->priv = dp83867;
+ ret = dp83867_of_init(phydev);
+ if (ret)
+ return ret;
+ } else {
+ dp83867 = (struct dp83867_private *)phydev->priv;
+ }
+
+ if (phy_interface_is_rgmii(phydev)) {
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL,
+ (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+ if (ret)
+ return ret;
+ }
+
+ if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) ||
+ (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
+ val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
+ DP83867_DEVADDR, phydev->addr);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ val |= DP83867_RGMII_TX_CLK_DELAY_EN;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ val |= DP83867_RGMII_RX_CLK_DELAY_EN;
+
+ phy_write_mmd_indirect(phydev, DP83867_RGMIICTL,
+ DP83867_DEVADDR, phydev->addr, val);
+
+ delay = (dp83867->rx_id_delay |
+ (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
+
+ phy_write_mmd_indirect(phydev, DP83867_RGMIIDCTL,
+ DP83867_DEVADDR, phydev->addr, delay);
+ }
+
+ return 0;
+}
+
+static int dp83867_phy_reset(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
+ if (err < 0)
+ return err;
+
+ return dp83867_config_init(phydev);
+}
+
+static struct phy_driver dp83867_driver[] = {
+ {
+ .phy_id = DP83867_PHY_ID,
+ .phy_id_mask = 0xfffffff0,
+ .name = "TI DP83867",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+
+ .config_init = dp83867_config_init,
+ .soft_reset = dp83867_phy_reset,
+
+ /* IRQ related */
+ .ack_interrupt = dp83867_ack_interrupt,
+ .config_intr = dp83867_config_intr,
+
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+
+ .driver = {.owner = THIS_MODULE,}
+ },
+};
+module_phy_driver(dp83867_driver);
+
+static struct mdio_device_id __maybe_unused dp83867_tbl[] = {
+ { DP83867_PHY_ID, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, dp83867_tbl);
+
+MODULE_DESCRIPTION("Texas Instruments DP83867 PHY driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 8644f039d922..0dbc445a5fa0 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -139,10 +139,7 @@ static int ip1001_config_init(struct phy_device *phydev)
if (c < 0)
return c;
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ if (phy_interface_is_rgmii(phydev)) {
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
if (c < 0)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 1b1698f98818..f721444c2b0a 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -317,10 +317,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ if (phy_interface_is_rgmii(phydev)) {
mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
MII_88E1121_PHY_MSCR_DELAY_MASK;
@@ -469,10 +466,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
int err;
int temp;
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ if (phy_interface_is_rgmii(phydev)) {
temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
if (temp < 0)
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index daec9b05d168..61a543c788cc 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -165,8 +165,11 @@ static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
ctrl->ops->set_mdio_dir(ctrl, 0);
- /* check the turnaround bit: the PHY should be driving it to zero */
- if (mdiobb_get_bit(ctrl) != 0) {
+ /* check the turnaround bit: the PHY should be driving it to zero, if this
+ * PHY is listed in phy_ignore_ta_mask as having broken TA, skip that
+ */
+ if (mdiobb_get_bit(ctrl) != 0 &&
+ !(bus->phy_ignore_ta_mask & (1 << phy))) {
/* PHY didn't drive TA low -- flush any bits it
* may be trying to send.
*/
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 53d18150f4e2..7dc21e56a7aa 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -158,6 +158,7 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
new_bus->name = "GPIO Bitbanged MDIO",
new_bus->phy_mask = pdata->phy_mask;
+ new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask;
new_bus->irq = pdata->irqs;
new_bus->parent = dev;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ebdc357c5131..499185eaf413 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -288,9 +288,10 @@ static int kszphy_config_init(struct phy_device *phydev)
}
static int ksz9021_load_values_from_of(struct phy_device *phydev,
- struct device_node *of_node, u16 reg,
- char *field1, char *field2,
- char *field3, char *field4)
+ const struct device_node *of_node,
+ u16 reg,
+ const char *field1, const char *field2,
+ const char *field3, const char *field4)
{
int val1 = -1;
int val2 = -2;
@@ -336,8 +337,8 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev,
static int ksz9021_config_init(struct phy_device *phydev)
{
- struct device *dev = &phydev->dev;
- struct device_node *of_node = dev->of_node;
+ const struct device *dev = &phydev->dev;
+ const struct device_node *of_node = dev->of_node;
if (!of_node && dev->parent->of_node)
of_node = dev->parent->of_node;
@@ -365,6 +366,11 @@ static int ksz9021_config_init(struct phy_device *phydev)
#define KSZ9031_PS_TO_REG 60
/* Extended registers */
+/* MMD Address 0x0 */
+#define MII_KSZ9031RN_FLP_BURST_TX_LO 3
+#define MII_KSZ9031RN_FLP_BURST_TX_HI 4
+
+/* MMD Address 0x2 */
#define MII_KSZ9031RN_CONTROL_PAD_SKEW 4
#define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5
#define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6
@@ -389,9 +395,9 @@ static int ksz9031_extended_read(struct phy_device *phydev,
}
static int ksz9031_of_load_skew_values(struct phy_device *phydev,
- struct device_node *of_node,
+ const struct device_node *of_node,
u16 reg, size_t field_sz,
- char *field[], u8 numfields)
+ const char *field[], u8 numfields)
{
int val[4] = {-1, -2, -3, -4};
int matches = 0;
@@ -425,20 +431,36 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
}
+static int ksz9031_center_flp_timing(struct phy_device *phydev)
+{
+ int result;
+
+ /* Center KSZ9031RNX FLP timing at 16ms. */
+ result = ksz9031_extended_write(phydev, OP_DATA, 0,
+ MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
+ result = ksz9031_extended_write(phydev, OP_DATA, 0,
+ MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
+
+ if (result)
+ return result;
+
+ return genphy_restart_aneg(phydev);
+}
+
static int ksz9031_config_init(struct phy_device *phydev)
{
- struct device *dev = &phydev->dev;
- struct device_node *of_node = dev->of_node;
- char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
- char *rx_data_skews[4] = {
+ const struct device *dev = &phydev->dev;
+ const struct device_node *of_node = dev->of_node;
+ static const char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
+ static const char *rx_data_skews[4] = {
"rxd0-skew-ps", "rxd1-skew-ps",
"rxd2-skew-ps", "rxd3-skew-ps"
};
- char *tx_data_skews[4] = {
+ static const char *tx_data_skews[4] = {
"txd0-skew-ps", "txd1-skew-ps",
"txd2-skew-ps", "txd3-skew-ps"
};
- char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
+ static const char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
if (!of_node && dev->parent->of_node)
of_node = dev->parent->of_node;
@@ -460,7 +482,8 @@ static int ksz9031_config_init(struct phy_device *phydev)
MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
tx_data_skews, 4);
}
- return 0;
+
+ return ksz9031_center_flp_timing(phydev);
}
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
@@ -519,7 +542,7 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
- struct device_node *np = phydev->dev.of_node;
+ const struct device_node *np = phydev->dev.of_node;
struct kszphy_priv *priv;
struct clk *clk;
int ret;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 47cd578052fc..b2197b506acb 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -58,6 +58,31 @@ static const char *phy_speed_to_str(int speed)
}
}
+#define PHY_STATE_STR(_state) \
+ case PHY_##_state: \
+ return __stringify(_state); \
+
+static const char *phy_state_to_str(enum phy_state st)
+{
+ switch (st) {
+ PHY_STATE_STR(DOWN)
+ PHY_STATE_STR(STARTING)
+ PHY_STATE_STR(READY)
+ PHY_STATE_STR(PENDING)
+ PHY_STATE_STR(UP)
+ PHY_STATE_STR(AN)
+ PHY_STATE_STR(RUNNING)
+ PHY_STATE_STR(NOLINK)
+ PHY_STATE_STR(FORCING)
+ PHY_STATE_STR(CHANGELINK)
+ PHY_STATE_STR(HALTED)
+ PHY_STATE_STR(RESUMING)
+ }
+
+ return NULL;
+}
+
+
/**
* phy_print_status - Convenience function to print out the current phy status
* @phydev: the phy_device struct
@@ -784,10 +809,13 @@ void phy_state_machine(struct work_struct *work)
struct phy_device *phydev =
container_of(dwork, struct phy_device, state_queue);
bool needs_aneg = false, do_suspend = false;
+ enum phy_state old_state;
int err = 0;
mutex_lock(&phydev->lock);
+ old_state = phydev->state;
+
if (phydev->drv->link_change_notify)
phydev->drv->link_change_notify(phydev);
@@ -952,6 +980,9 @@ void phy_state_machine(struct work_struct *work)
if (err < 0)
phy_error(phydev);
+ dev_dbg(&phydev->dev, "PHY state change %s -> %s\n",
+ phy_state_to_str(old_state), phy_state_to_str(phydev->state));
+
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
PHY_STATE_TIME * HZ);
}
@@ -1062,8 +1093,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if ((phydev->duplex == DUPLEX_FULL) &&
((phydev->interface == PHY_INTERFACE_MODE_MII) ||
(phydev->interface == PHY_INTERFACE_MODE_GMII) ||
- (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
- phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
+ phy_interface_is_rgmii(phydev) ||
phy_is_internal(phydev))) {
int eee_lp, eee_cap, eee_adv;
u32 lp, cap, adv;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index b62a5e3a1c65..3837ae344f63 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -550,11 +550,11 @@ static struct proto pppoe_sk_proto __read_mostly = {
* Initialize a new struct sock.
*
**********************************************************************/
-static int pppoe_create(struct net *net, struct socket *sock)
+static int pppoe_create(struct net *net, struct socket *sock, int kern)
{
struct sock *sk;
- sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto);
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto, kern);
if (!sk)
return -ENOMEM;
diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
index 2940e9fe351b..0e1b30622477 100644
--- a/drivers/net/ppp/pppox.c
+++ b/drivers/net/ppp/pppox.c
@@ -118,7 +118,7 @@ static int pppox_create(struct net *net, struct socket *sock, int protocol,
!try_module_get(pppox_protos[protocol]->owner))
goto out;
- rc = pppox_protos[protocol]->create(net, sock);
+ rc = pppox_protos[protocol]->create(net, sock, kern);
module_put(pppox_protos[protocol]->owner);
out:
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index e3bfbd4d0136..14839bc0aaf5 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -561,14 +561,14 @@ static void pptp_sock_destruct(struct sock *sk)
skb_queue_purge(&sk->sk_receive_queue);
}
-static int pptp_create(struct net *net, struct socket *sock)
+static int pptp_create(struct net *net, struct socket *sock, int kern)
{
int error = -ENOMEM;
struct sock *sk;
struct pppox_sock *po;
struct pptp_opt *opt;
- sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto);
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto, kern);
if (!sk)
goto out;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6928448f6b7f..daa054b3ff03 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1924,7 +1924,7 @@ static netdev_features_t team_fix_features(struct net_device *dev,
struct team *team = netdev_priv(dev);
netdev_features_t mask;
- mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
+ mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
@@ -1977,8 +1977,12 @@ static const struct net_device_ops team_netdev_ops = {
.ndo_del_slave = team_del_slave,
.ndo_fix_features = team_fix_features,
.ndo_change_carrier = team_change_carrier,
- .ndo_bridge_setlink = ndo_dflt_netdev_switch_port_bridge_setlink,
- .ndo_bridge_dellink = ndo_dflt_netdev_switch_port_bridge_dellink,
+ .ndo_bridge_setlink = switchdev_port_bridge_setlink,
+ .ndo_bridge_getlink = switchdev_port_bridge_getlink,
+ .ndo_bridge_dellink = switchdev_port_bridge_dellink,
+ .ndo_fdb_add = switchdev_port_fdb_add,
+ .ndo_fdb_del = switchdev_port_fdb_del,
+ .ndo_fdb_dump = switchdev_port_fdb_dump,
.ndo_features_check = passthru_features_check,
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e470ae59d405..1a1c4f7b3ec5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -146,7 +146,6 @@ struct tun_file {
struct socket socket;
struct socket_wq wq;
struct tun_struct __rcu *tun;
- struct net *net;
struct fasync_struct *fasync;
/* only used for fasnyc */
unsigned int flags;
@@ -493,10 +492,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
-
- BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
- &tfile->socket.flags));
- sk_release_kernel(&tfile->sk);
+ sock_put(&tfile->sk);
}
}
@@ -1492,18 +1488,10 @@ out:
return ret;
}
-static int tun_release(struct socket *sock)
-{
- if (sock->sk)
- sock_put(sock->sk);
- return 0;
-}
-
/* Ops structure to mimic raw sockets with tun */
static const struct proto_ops tun_socket_ops = {
.sendmsg = tun_sendmsg,
.recvmsg = tun_recvmsg,
- .release = tun_release,
};
static struct proto tun_proto = {
@@ -1865,7 +1853,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
if (cmd == TUNSETIFF && !tun) {
ifr.ifr_name[IFNAMSIZ-1] = '\0';
- ret = tun_set_iff(tfile->net, file, &ifr);
+ ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr);
if (ret)
goto unlock;
@@ -2154,16 +2142,16 @@ out:
static int tun_chr_open(struct inode *inode, struct file * file)
{
+ struct net *net = current->nsproxy->net_ns;
struct tun_file *tfile;
DBG1(KERN_INFO, "tunX: tun_chr_open\n");
- tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
- &tun_proto);
+ tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
+ &tun_proto, 0);
if (!tfile)
return -ENOMEM;
RCU_INIT_POINTER(tfile->tun, NULL);
- tfile->net = get_net(current->nsproxy->net_ns);
tfile->flags = 0;
tfile->ifindex = 0;
@@ -2174,13 +2162,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
tfile->socket.ops = &tun_socket_ops;
sock_init_data(&tfile->socket, &tfile->sk);
- sk_change_net(&tfile->sk, tfile->net);
tfile->sk.sk_write_space = tun_sock_write_space;
tfile->sk.sk_sndbuf = INT_MAX;
file->private_data = tfile;
- set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
INIT_LIST_HEAD(&tfile->next);
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
@@ -2191,10 +2177,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
static int tun_chr_close(struct inode *inode, struct file *file)
{
struct tun_file *tfile = file->private_data;
- struct net *net = tfile->net;
tun_detach(tfile, true);
- put_net(net);
return 0;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 21a0fbf1ed94..34c519eb1db5 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -336,7 +336,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
nla_put_s32(skb, NDA_LINK_NETNSID,
- peernet2id(dev_net(vxlan->dev), vxlan->net)))
+ peernet2id_alloc(dev_net(vxlan->dev), vxlan->net)))
goto nla_put_failure;
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
@@ -1921,6 +1921,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
memset(&fl4, 0, sizeof(fl4));
fl4.flowi4_oif = rdst->remote_ifindex;
fl4.flowi4_tos = RT_TOS(tos);
+ fl4.flowi4_mark = skb->mark;
+ fl4.flowi4_proto = IPPROTO_UDP;
fl4.daddr = dst->sin.sin_addr.s_addr;
fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
@@ -1981,6 +1983,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
fl6.flowi6_oif = rdst->remote_ifindex;
fl6.daddr = dst->sin6.sin6_addr;
fl6.saddr = vxlan->saddr.sin6.sin6_addr;
+ fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
@@ -2128,9 +2131,10 @@ static void vxlan_cleanup(unsigned long arg)
if (!netif_running(vxlan->dev))
return;
- spin_lock_bh(&vxlan->hash_lock);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct hlist_node *p, *n;
+
+ spin_lock_bh(&vxlan->hash_lock);
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
@@ -2149,8 +2153,8 @@ static void vxlan_cleanup(unsigned long arg)
} else if (time_before(timeout, next_timer))
next_timer = timeout;
}
+ spin_unlock_bh(&vxlan->hash_lock);
}
- spin_unlock_bh(&vxlan->hash_lock);
mod_timer(&vxlan->age_timer, next_timer);
}
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index bcfa01add7cc..7193b7304fdd 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -517,7 +517,7 @@ static int cosa_probe(int base, int irq, int dma)
*/
set_current_state(TASK_INTERRUPTIBLE);
cosa_putstatus(cosa, SR_TX_INT_ENA);
- schedule_timeout(30);
+ schedule_timeout(msecs_to_jiffies(300));
irq = probe_irq_off(irqs);
/* Disable all IRQs from the card */
cosa_putstatus(cosa, 0);
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 08223569cebd..7a72407208b1 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -551,7 +551,7 @@ static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
msg, i);
goto done;
}
- schedule_timeout_uninterruptible(10);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(100));
rmb();
} while (++i > 0);
netdev_err(dev, "%s timeout\n", msg);
@@ -596,7 +596,7 @@ static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
(dpriv->iqtx[cur] & cpu_to_le32(Xpr)))
break;
smp_rmb();
- schedule_timeout_uninterruptible(10);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(100));
} while (++i > 0);
return (i >= 0 ) ? i : -EAGAIN;
@@ -1033,7 +1033,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
/* Flush posted writes */
readl(ioaddr + GSTAR);
- schedule_timeout_uninterruptible(10);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(100));
for (i = 0; i < 16; i++)
pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
@@ -1046,7 +1046,6 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
static int dscc4_open(struct net_device *dev)
{
struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
- struct dscc4_pci_priv *ppriv;
int ret = -EAGAIN;
if ((dscc4_loopback_check(dpriv) < 0))
@@ -1055,8 +1054,6 @@ static int dscc4_open(struct net_device *dev)
if ((ret = hdlc_open(dev)))
goto err;
- ppriv = dpriv->pci_priv;
-
/*
* Due to various bugs, there is no way to reliably reset a
* specific port (manufacturer's dependent special PCI #RST wiring
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 16604bdf5197..a63ab2e83105 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -277,6 +277,7 @@ source "drivers/net/wireless/libertas/Kconfig"
source "drivers/net/wireless/orinoco/Kconfig"
source "drivers/net/wireless/p54/Kconfig"
source "drivers/net/wireless/rt2x00/Kconfig"
+source "drivers/net/wireless/mediatek/Kconfig"
source "drivers/net/wireless/rtlwifi/Kconfig"
source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zd1211rw/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 0c8891686718..6b9e729dd8ac 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -45,6 +45,8 @@ obj-$(CONFIG_IWLWIFI) += iwlwifi/
obj-$(CONFIG_IWLEGACY) += iwlegacy/
obj-$(CONFIG_RT2X00) += rt2x00/
+obj-$(CONFIG_WL_MEDIATEK) += mediatek/
+
obj-$(CONFIG_P54_COMMON) += p54/
obj-$(CONFIG_ATH_CARDS) += ath/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f07a61899545..8c283fcd843d 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1098,14 +1098,18 @@ static void adm8211_hw_init(struct ieee80211_hw *dev)
pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline);
switch (cline) {
- case 0x8: reg |= (0x1 << 14);
- break;
- case 0x16: reg |= (0x2 << 14);
- break;
- case 0x32: reg |= (0x3 << 14);
- break;
- default: reg |= (0x0 << 14);
- break;
+ case 0x8:
+ reg |= (0x1 << 14);
+ break;
+ case 0x10:
+ reg |= (0x2 << 14);
+ break;
+ case 0x20:
+ reg |= (0x3 << 14);
+ break;
+ default:
+ reg |= (0x0 << 14);
+ break;
}
}
@@ -1353,12 +1357,7 @@ static void adm8211_configure_filter(struct ieee80211_hw *dev,
new_flags = 0;
- if (*total_flags & FIF_PROMISC_IN_BSS) {
- new_flags |= FIF_PROMISC_IN_BSS;
- priv->nar |= ADM8211_NAR_PR;
- priv->nar &= ~ADM8211_NAR_MM;
- mc_filter[1] = mc_filter[0] = ~0;
- } else if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) {
+ if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) {
new_flags |= FIF_ALLMULTI;
priv->nar &= ~ADM8211_NAR_PR;
priv->nar |= ADM8211_NAR_MM;
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h
index 55090a38ac95..ae03271f878e 100644
--- a/drivers/net/wireless/at76c50x-usb.h
+++ b/drivers/net/wireless/at76c50x-usb.h
@@ -447,7 +447,7 @@ struct at76_priv {
int mac80211_registered;
};
-#define AT76_SUPPORTED_FILTERS FIF_PROMISC_IN_BSS
+#define AT76_SUPPORTED_FILTERS 0
#define SCAN_POLL_INTERVAL (HZ / 4)
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 5147ebe4cd05..14937cbeca56 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1319,8 +1319,7 @@ out_unlock:
}
-#define AR5523_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
+#define AR5523_SUPPORTED_FILTERS (FIF_ALLMULTI | \
FIF_FCSFAIL | \
FIF_OTHER_BSS)
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 7e9481099a8e..65ef483ebf50 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -251,6 +251,7 @@ void ath_printk(const char *level, const struct ath_common *common,
* @ATH_DBG_DFS: radar datection
* @ATH_DBG_WOW: Wake on Wireless
* @ATH_DBG_DYNACK: dynack handling
+ * @ATH_DBG_SPECTRAL_SCAN: FFT spectral scan
* @ATH_DBG_ANY: enable all debugging
*
* The debug level is used to control the amount and type of debugging output
@@ -280,6 +281,7 @@ enum ATH_DEBUG {
ATH_DBG_WOW = 0x00020000,
ATH_DBG_CHAN_CTX = 0x00040000,
ATH_DBG_DYNACK = 0x00080000,
+ ATH_DBG_SPECTRAL_SCAN = 0x00100000,
ATH_DBG_ANY = 0xffffffff
};
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index f4dbb3e93bf8..9729e6941635 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -10,13 +10,15 @@ ath10k_core-y += mac.o \
wmi.o \
wmi-tlv.o \
bmi.o \
- hw.o
+ hw.o \
+ p2p.o
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
ath10k_core-$(CONFIG_THERMAL) += thermal.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
+ath10k_core-$(CONFIG_PM) += wow.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index c0e454bb6a8d..bcccae19325d 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -387,7 +387,9 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
- if (!skip_otp && result != 0) {
+ if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+ ar->fw_features))
+ && result != 0) {
ath10k_err(ar, "otp calibration failed: %d", result);
return -EINVAL;
}
@@ -482,31 +484,79 @@ static int ath10k_fetch_cal_file(struct ath10k *ar)
return 0;
}
-static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
+static int ath10k_core_fetch_spec_board_file(struct ath10k *ar)
{
- int ret = 0;
+ char filename[100];
- if (ar->hw_params.fw.fw == NULL) {
- ath10k_err(ar, "firmware file not defined\n");
- return -EINVAL;
- }
+ scnprintf(filename, sizeof(filename), "board-%s-%s.bin",
+ ath10k_bus_str(ar->hif.bus), ar->spec_board_id);
+
+ ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
+ if (IS_ERR(ar->board))
+ return PTR_ERR(ar->board);
+
+ ar->board_data = ar->board->data;
+ ar->board_len = ar->board->size;
+ ar->spec_board_loaded = true;
- if (ar->hw_params.fw.board == NULL) {
- ath10k_err(ar, "board data file not defined");
+ return 0;
+}
+
+static int ath10k_core_fetch_generic_board_file(struct ath10k *ar)
+{
+ if (!ar->hw_params.fw.board) {
+ ath10k_err(ar, "failed to find board file fw entry\n");
return -EINVAL;
}
ar->board = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ar->hw_params.fw.board);
- if (IS_ERR(ar->board)) {
- ret = PTR_ERR(ar->board);
- ath10k_err(ar, "could not fetch board data (%d)\n", ret);
- goto err;
- }
+ if (IS_ERR(ar->board))
+ return PTR_ERR(ar->board);
ar->board_data = ar->board->data;
ar->board_len = ar->board->size;
+ ar->spec_board_loaded = false;
+
+ return 0;
+}
+
+static int ath10k_core_fetch_board_file(struct ath10k *ar)
+{
+ int ret;
+
+ if (strlen(ar->spec_board_id) > 0) {
+ ret = ath10k_core_fetch_spec_board_file(ar);
+ if (ret) {
+ ath10k_info(ar, "failed to load spec board file, falling back to generic: %d\n",
+ ret);
+ goto generic;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found specific board file for %s\n",
+ ar->spec_board_id);
+ return 0;
+ }
+
+generic:
+ ret = ath10k_core_fetch_generic_board_file(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch generic board data: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
+{
+ int ret = 0;
+
+ if (ar->hw_params.fw.fw == NULL) {
+ ath10k_err(ar, "firmware file not defined\n");
+ return -EINVAL;
+ }
ar->firmware = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
@@ -675,6 +725,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
ar->wmi.op_version);
break;
+ case ATH10K_FW_IE_HTT_OP_VERSION:
+ if (ie_len != sizeof(u32))
+ break;
+
+ version = (__le32 *)data;
+
+ ar->htt.op_version = le32_to_cpup(version);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
+ ar->htt.op_version);
+ break;
default:
ath10k_warn(ar, "Unknown FW IE: %u\n",
le32_to_cpu(hdr->id));
@@ -695,27 +756,6 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
goto err;
}
- /* now fetch the board file */
- if (ar->hw_params.fw.board == NULL) {
- ath10k_err(ar, "board data file not defined");
- ret = -EINVAL;
- goto err;
- }
-
- ar->board = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
- if (IS_ERR(ar->board)) {
- ret = PTR_ERR(ar->board);
- ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n",
- ar->hw_params.fw.dir, ar->hw_params.fw.board,
- ret);
- goto err;
- }
-
- ar->board_data = ar->board->data;
- ar->board_len = ar->board->size;
-
return 0;
err:
@@ -730,6 +770,19 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
/* calibration file is optional, don't check for any errors */
ath10k_fetch_cal_file(ar);
+ ret = ath10k_core_fetch_board_file(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+ return ret;
+ }
+
+ ar->fw_api = 5;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
+ if (ret == 0)
+ goto success;
+
ar->fw_api = 4;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
@@ -958,6 +1011,8 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->max_num_stations = TARGET_NUM_STATIONS;
ar->max_num_vdevs = TARGET_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
+ ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+ WMI_STAT_PEER;
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
@@ -966,12 +1021,17 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->max_num_stations = TARGET_10X_NUM_STATIONS;
ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
+ ar->fw_stats_req_mask = WMI_STAT_PEER;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->max_num_peers = TARGET_TLV_NUM_PEERS;
ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
+ ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
+ ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
+ ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+ WMI_STAT_PEER;
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -979,6 +1039,29 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
return -EINVAL;
}
+ /* Backwards compatibility for firmwares without
+ * ATH10K_FW_IE_HTT_OP_VERSION.
+ */
+ if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
+ switch (ar->wmi.op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -1080,9 +1163,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
status = ath10k_wmi_wait_for_service_ready(ar);
- if (status <= 0) {
+ if (status) {
ath10k_warn(ar, "wmi service ready event not received");
- status = -ETIMEDOUT;
goto err_hif_stop;
}
}
@@ -1098,9 +1180,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
}
status = ath10k_wmi_wait_for_unified_ready(ar);
- if (status <= 0) {
+ if (status) {
ath10k_err(ar, "wmi unified ready event not received\n");
- status = -ETIMEDOUT;
goto err_hif_stop;
}
@@ -1151,6 +1232,7 @@ EXPORT_SYMBOL(ath10k_core_start);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
{
int ret;
+ unsigned long time_left;
reinit_completion(&ar->target_suspend);
@@ -1160,9 +1242,9 @@ int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
return ret;
}
- ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
+ time_left = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
- if (ret == 0) {
+ if (!time_left) {
ath10k_warn(ar, "suspend timed out - target pause event never came\n");
return -ETIMEDOUT;
}
@@ -1386,6 +1468,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
init_completion(&ar->target_suspend);
+ init_completion(&ar->wow.wakeup_completed);
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index f65310c3ba5f..70fcdc9c2758 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -35,6 +35,7 @@
#include "../dfs_pattern_detector.h"
#include "spectral.h"
#include "thermal.h"
+#include "wow.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -43,15 +44,16 @@
#define ATH10K_SCAN_ID 0
#define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
-#define ATH10K_NUM_CHANS 38
+#define ATH10K_CONNECTION_LOSS_HZ (3*HZ)
+#define ATH10K_NUM_CHANS 39
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
#define ATH10K_MAX_NUM_MGMT_PENDING 128
-/* number of failed packets */
-#define ATH10K_KICKOUT_THRESHOLD 50
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH10K_KICKOUT_THRESHOLD (20 * 16)
/*
* Use insanely high numbers to make sure that the firmware implementation
@@ -82,6 +84,8 @@ struct ath10k_skb_cb {
dma_addr_t paddr;
u8 eid;
u8 vdev_id;
+ enum ath10k_hw_txrx_mode txmode;
+ bool is_protected;
struct {
u8 tid;
@@ -280,6 +284,15 @@ struct ath10k_sta {
#endif
};
+struct ath10k_chanctx {
+ /* Used to story copy of chanctx_conf to avoid inconsistencies. Ideally
+ * mac80211 should allow some sort of explicit locking to guarantee
+ * that the publicly available chanctx_conf can be accessed safely at
+ * all times.
+ */
+ struct ieee80211_chanctx_conf conf;
+};
+
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
enum ath10k_beacon_state {
@@ -301,6 +314,7 @@ struct ath10k_vif {
enum ath10k_beacon_state beacon_state;
void *beacon_buf;
dma_addr_t beacon_paddr;
+ unsigned long tx_paused; /* arbitrary values defined by target */
struct ath10k *ar;
struct ieee80211_vif *vif;
@@ -334,13 +348,13 @@ struct ath10k_vif {
} ap;
} u;
- u8 fixed_rate;
- u8 fixed_nss;
- u8 force_sgi;
bool use_cts_prot;
int num_legacy_stations;
int txpower;
struct wmi_wmm_params_all_arg wmm_params;
+ struct work_struct ap_csa_work;
+ struct delayed_work connection_loss_work;
+ struct cfg80211_bitrate_mask bitrate_mask;
};
struct ath10k_vif_iter {
@@ -440,6 +454,20 @@ enum ath10k_fw_features {
*/
ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
+ /* Some firmware revisions have an incomplete WoWLAN implementation
+ * despite WMI service bit being advertised. This feature flag is used
+ * to distinguish whether WoWLAN is really supported or not.
+ */
+ ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
+
+ /* Don't trust error code from otp.bin */
+ ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+
+ /* Some firmware revisions pad 4th hw address to 4 byte boundary making
+ * it 8 bytes long in Native Wifi Rx decap.
+ */
+ ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -498,6 +526,11 @@ static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
return "unknown";
}
+enum ath10k_tx_pause_reason {
+ ATH10K_TX_PAUSE_Q_FULL,
+ ATH10K_TX_PAUSE_MAX,
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@@ -511,12 +544,15 @@ struct ath10k {
u32 fw_version_minor;
u16 fw_version_release;
u16 fw_version_build;
+ u32 fw_stats_req_mask;
u32 phy_capability;
u32 hw_min_tx_power;
u32 hw_max_tx_power;
u32 ht_cap_info;
u32 vht_cap_info;
u32 num_rf_chains;
+ /* protected by conf_mutex */
+ bool ani_enabled;
DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
@@ -565,6 +601,9 @@ struct ath10k {
const struct firmware *cal_file;
+ char spec_board_id[100];
+ bool spec_board_loaded;
+
int fw_api;
enum ath10k_cal_mode cal_mode;
@@ -593,6 +632,7 @@ struct ath10k {
struct cfg80211_chan_def chandef;
unsigned long long free_vdev_map;
+ struct ath10k_vif *monitor_arvif;
bool monitor;
int monitor_vdev_id;
bool monitor_started;
@@ -633,6 +673,7 @@ struct ath10k {
int max_num_peers;
int max_num_stations;
int max_num_vdevs;
+ int max_num_tdls_vdevs;
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
@@ -655,6 +696,8 @@ struct ath10k {
struct dfs_pattern_detector *dfs_detector;
+ unsigned long tx_paused; /* see ATH10K_TX_PAUSE_ */
+
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
#endif
@@ -686,6 +729,7 @@ struct ath10k {
} stats;
struct ath10k_thermal thermal;
+ struct ath10k_wow wow;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 301081db1ef6..8fa606a9c4dd 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -124,10 +124,14 @@ EXPORT_SYMBOL(ath10k_info);
void ath10k_print_driver_info(struct ath10k *ar)
{
- ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
+ ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
+ (strlen(ar->spec_board_id) > 0 ? ", " : ""),
+ ar->spec_board_id,
+ (strlen(ar->spec_board_id) > 0 && !ar->spec_board_loaded
+ ? " fallback" : ""),
ar->hw->wiphy->fw_version,
ar->fw_api,
ar->htt.target_version_major,
@@ -380,12 +384,12 @@ unlock:
static int ath10k_debug_fw_stats_request(struct ath10k *ar)
{
- unsigned long timeout;
+ unsigned long timeout, time_left;
int ret;
lockdep_assert_held(&ar->conf_mutex);
- timeout = jiffies + msecs_to_jiffies(1*HZ);
+ timeout = jiffies + msecs_to_jiffies(1 * HZ);
ath10k_debug_fw_stats_reset(ar);
@@ -395,18 +399,16 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
reinit_completion(&ar->debug.fw_stats_complete);
- ret = ath10k_wmi_request_stats(ar,
- WMI_STAT_PDEV |
- WMI_STAT_VDEV |
- WMI_STAT_PEER);
+ ret = ath10k_wmi_request_stats(ar, ar->fw_stats_req_mask);
if (ret) {
ath10k_warn(ar, "could not request stats (%d)\n", ret);
return ret;
}
- ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
- 1*HZ);
- if (ret == 0)
+ time_left =
+ wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+ 1 * HZ);
+ if (!time_left)
return -ETIMEDOUT;
spin_lock_bh(&ar->data_lock);
@@ -1708,6 +1710,61 @@ static int ath10k_debug_cal_data_release(struct inode *inode,
return 0;
}
+static ssize_t ath10k_write_ani_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ u8 enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->ani_enabled == enable) {
+ ret = count;
+ goto exit;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->ani_enable,
+ enable);
+ if (ret) {
+ ath10k_warn(ar, "ani_enable failed from debugfs: %d\n", ret);
+ goto exit;
+ }
+ ar->ani_enabled = enable;
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+ char buf[32];
+
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->ani_enabled);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ani_enable = {
+ .read = ath10k_read_ani_enable,
+ .write = ath10k_write_ani_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static const struct file_operations fops_cal_data = {
.open = ath10k_debug_cal_data_open,
.read = ath10k_debug_cal_data_read,
@@ -1991,6 +2048,50 @@ static const struct file_operations fops_pktlog_filter = {
.open = simple_open
};
+static ssize_t ath10k_write_quiet_period(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 period;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &period))
+ return -EINVAL;
+
+ if (period < ATH10K_QUIET_PERIOD_MIN) {
+ ath10k_warn(ar, "Quiet period %u can not be lesser than 25ms\n",
+ period);
+ return -EINVAL;
+ }
+ mutex_lock(&ar->conf_mutex);
+ ar->thermal.quiet_period = period;
+ ath10k_thermal_set_throttling(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static ssize_t ath10k_read_quiet_period(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->thermal.quiet_period);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_quiet_period = {
+ .read = ath10k_read_quiet_period,
+ .write = ath10k_write_quiet_period,
+ .open = simple_open
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
@@ -2068,6 +2169,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_cal_data);
+ debugfs_create_file("ani_enable", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_ani_enable);
+
debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
@@ -2088,6 +2192,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
+ debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_quiet_period);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index a12b8323f9f1..53bd6a19eab6 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -36,6 +36,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_REGULATORY = 0x00000800,
ATH10K_DBG_TESTMODE = 0x00001000,
ATH10K_DBG_WMI_PRINT = 0x00002000,
+ ATH10K_DBG_PCI_PS = 0x00004000,
ATH10K_DBG_ANY = 0xffffffff,
};
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 2fd9e180272b..85bfa2acb801 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -86,21 +86,6 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
}
-/* assumes tx_lock is held */
-static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
-{
- struct ath10k *ar = ep->htc->ar;
-
- if (!ep->tx_credit_flow_enabled)
- return false;
- if (ep->tx_credits >= ep->tx_credits_per_max_message)
- return false;
-
- ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
- ep->eid);
- return true;
-}
-
static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
@@ -111,13 +96,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
+ hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
-
- if (ath10k_htc_ep_need_credit_update(ep))
- hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
-
spin_unlock_bh(&ep->htc->tx_lock);
}
@@ -414,7 +396,8 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
switch (__le16_to_cpu(msg->hdr.message_id)) {
- default:
+ case ATH10K_HTC_MSG_READY_ID:
+ case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
/* handle HTC control message */
if (completion_done(&htc->ctl_resp)) {
/*
@@ -438,6 +421,10 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
break;
case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
htc->htc_ops.target_send_suspend_complete(ar);
+ break;
+ default:
+ ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
+ break;
}
goto out;
}
@@ -548,6 +535,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
int i, status = 0;
+ unsigned long time_left;
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
struct ath10k_htc_msg *msg;
@@ -555,9 +543,9 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
u16 credit_count;
u16 credit_size;
- status = wait_for_completion_timeout(&htc->ctl_resp,
- ATH10K_HTC_WAIT_TIMEOUT_HZ);
- if (status == 0) {
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
+ if (!time_left) {
/* Workaround: In some cases the PCI HIF doesn't
* receive interrupt for the control response message
* even if the buffer was completed. It is suspected
@@ -569,10 +557,11 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(htc->ar, i, 1);
- status = wait_for_completion_timeout(&htc->ctl_resp,
- ATH10K_HTC_WAIT_TIMEOUT_HZ);
+ time_left =
+ wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
- if (status == 0)
+ if (!time_left)
status = -ETIMEDOUT;
}
@@ -646,6 +635,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct sk_buff *skb;
unsigned int max_msg_size = 0;
int length, status;
+ unsigned long time_left;
bool disable_credit_flow_ctrl = false;
u16 message_id, service_id, flags = 0;
u8 tx_alloc = 0;
@@ -701,10 +691,10 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
}
/* wait for response */
- status = wait_for_completion_timeout(&htc->ctl_resp,
- ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
- if (status == 0) {
- ath10k_err(ar, "Service connect timeout: %d\n", status);
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath10k_err(ar, "Service connect timeout\n");
return -ETIMEDOUT;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 4f59ab923e48..6da6ef26143a 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -22,6 +22,86 @@
#include "core.h"
#include "debug.h"
+static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = {
+ [HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_MAIN_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_MAIN_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND] =
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
+static const enum htt_t2h_msg_type htt_10x_t2h_msg_types[] = {
+ [HTT_10X_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_10X_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_10X_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_10X_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_10X_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_10X_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_10X_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_10X_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_10X_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_10X_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ [HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_10X_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+ [HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ [HTT_10X_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+ [HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD] = HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+ [HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+};
+
+static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
+ [HTT_TLV_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_TLV_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_TLV_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_TLV_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_TLV_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_TLV_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_TLV_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_TLV_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ [HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ [HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND] =
+ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+ [HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE] =
+ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+ [HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ [HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR] =
+ HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+ [HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
@@ -66,6 +146,24 @@ int ath10k_htt_init(struct ath10k *ar)
8 + /* llc snap */
2; /* ip4 dscp or ip6 priority */
+ switch (ar->htt.op_version) {
+ case ATH10K_FW_HTT_OP_VERSION_10_1:
+ ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_TLV:
+ ar->htt.t2h_msg_types = htt_tlv_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_TLV_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_MAIN:
+ ar->htt.t2h_msg_types = htt_main_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_MAIN_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_MAX:
+ case ATH10K_FW_HTT_OP_VERSION_UNSET:
+ WARN_ON(1);
+ return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 874bf44ff7a2..7e8a0d835663 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -25,7 +25,9 @@
#include <net/mac80211.h>
#include "htc.h"
+#include "hw.h"
#include "rx_desc.h"
+#include "hw.h"
enum htt_dbg_stats_type {
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
@@ -271,35 +273,108 @@ enum htt_mgmt_tx_status {
/*=== target -> host messages ===============================================*/
-enum htt_t2h_msg_type {
- HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0,
- HTT_T2H_MSG_TYPE_RX_IND = 0x1,
- HTT_T2H_MSG_TYPE_RX_FLUSH = 0x2,
- HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
- HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
- HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
- HTT_T2H_MSG_TYPE_RX_DELBA = 0x6,
- HTT_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
- HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
- HTT_T2H_MSG_TYPE_STATS_CONF = 0x9,
- HTT_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
- HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
- HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
- HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
- HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
- HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
- HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
- HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
- HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
+enum htt_main_t2h_msg_type {
+ HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_MAIN_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_MAIN_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_MAIN_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_MAIN_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_MAIN_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_MAIN_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
+ HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
+ HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND = 0x10,
+ HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
+ HTT_MAIN_T2H_MSG_TYPE_TEST,
+ /* keep this last */
+ HTT_MAIN_T2H_NUM_MSGS
+};
+
+enum htt_10x_t2h_msg_type {
+ HTT_10X_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_10X_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_10X_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_10X_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_10X_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_10X_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_10X_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_10X_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_10X_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_10X_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
+ HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_10X_T2H_MSG_TYPE_TEST = 0xe,
+ HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
+ HTT_10X_T2H_MSG_TYPE_AGGR_CONF = 0x11,
+ HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x12,
+ HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0x13,
+ /* keep this last */
+ HTT_10X_T2H_NUM_MSGS
+};
+
+enum htt_tlv_t2h_msg_type {
+ HTT_TLV_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_TLV_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_TLV_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_TLV_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_TLV_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_TLV_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_TLV_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_TLV_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_TLV_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, /* deprecated */
+ HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
+ HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
+ HTT_TLV_T2H_MSG_TYPE_RX_PN_IND = 0x10,
+ HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
+ HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
/* 0x13 reservd */
- HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
+ HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
+ HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE = 0x15,
+ HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR = 0x16,
+ HTT_TLV_T2H_MSG_TYPE_TEST,
+ /* keep this last */
+ HTT_TLV_T2H_NUM_MSGS
+};
- /* FIXME: Do not depend on this event id. Numbering of this event id is
- * broken across different firmware revisions and HTT version fails to
- * indicate this.
- */
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_RX_IND,
+ HTT_T2H_MSG_TYPE_RX_FLUSH,
+ HTT_T2H_MSG_TYPE_PEER_MAP,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ HTT_T2H_MSG_TYPE_RX_ADDBA,
+ HTT_T2H_MSG_TYPE_RX_DELBA,
+ HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ HTT_T2H_MSG_TYPE_PKTLOG,
+ HTT_T2H_MSG_TYPE_STATS_CONF,
+ HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ HTT_T2H_MSG_TYPE_SEC_IND,
+ HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ HTT_T2H_MSG_TYPE_RX_PN_IND,
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+ HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+ HTT_T2H_MSG_TYPE_AGGR_CONF,
+ HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
HTT_T2H_MSG_TYPE_TEST,
-
/* keep this last */
HTT_T2H_NUM_MSGS
};
@@ -1222,6 +1297,7 @@ struct htt_tx_done {
u32 msdu_id;
bool discard;
bool no_ack;
+ bool success;
};
struct htt_peer_map_event {
@@ -1248,6 +1324,10 @@ struct ath10k_htt {
u8 target_version_major;
u8 target_version_minor;
struct completion target_version_received;
+ enum ath10k_fw_htt_op_version op_version;
+
+ const enum htt_t2h_msg_type *t2h_msg_types;
+ u32 t2h_msg_types_max;
struct {
/*
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 01a2b384f358..89eb16b30fc4 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -637,58 +637,21 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
return 0;
}
-struct rfc1042_hdr {
- u8 llc_dsap;
- u8 llc_ssap;
- u8 llc_ctrl;
- u8 snap_oui[3];
- __be16 snap_type;
-} __packed;
-
struct amsdu_subframe_hdr {
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
__be16 len;
} __packed;
-static const u8 rx_legacy_rate_idx[] = {
- 3, /* 0x00 - 11Mbps */
- 2, /* 0x01 - 5.5Mbps */
- 1, /* 0x02 - 2Mbps */
- 0, /* 0x03 - 1Mbps */
- 3, /* 0x04 - 11Mbps */
- 2, /* 0x05 - 5.5Mbps */
- 1, /* 0x06 - 2Mbps */
- 0, /* 0x07 - 1Mbps */
- 10, /* 0x08 - 48Mbps */
- 8, /* 0x09 - 24Mbps */
- 6, /* 0x0A - 12Mbps */
- 4, /* 0x0B - 6Mbps */
- 11, /* 0x0C - 54Mbps */
- 9, /* 0x0D - 36Mbps */
- 7, /* 0x0E - 18Mbps */
- 5, /* 0x0F - 9Mbps */
-};
-
static void ath10k_htt_rx_h_rates(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
- enum ieee80211_band band;
- u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
+ struct ieee80211_supported_band *sband;
+ u8 cck, rate, bw, sgi, mcs, nss;
u8 preamble = 0;
u32 info1, info2, info3;
- /* Band value can't be set as undefined but freq can be 0 - use that to
- * determine whether band is provided.
- *
- * FIXME: Perhaps this can go away if CCK rate reporting is a little
- * reworked?
- */
- if (!status->freq)
- return;
-
- band = status->band;
info1 = __le32_to_cpu(rxd->ppdu_start.info1);
info2 = __le32_to_cpu(rxd->ppdu_start.info2);
info3 = __le32_to_cpu(rxd->ppdu_start.info3);
@@ -697,31 +660,18 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
switch (preamble) {
case HTT_RX_LEGACY:
+ /* To get legacy rate index band is required. Since band can't
+ * be undefined check if freq is non-zero.
+ */
+ if (!status->freq)
+ return;
+
cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
- rate_idx = 0;
-
- if (rate < 0x08 || rate > 0x0F)
- break;
-
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- if (cck)
- rate &= ~BIT(3);
- rate_idx = rx_legacy_rate_idx[rate];
- break;
- case IEEE80211_BAND_5GHZ:
- rate_idx = rx_legacy_rate_idx[rate];
- /* We are using same rate table registering
- HW - ath10k_rates[]. In case of 5GHz skip
- CCK rates, so -4 here */
- rate_idx -= 4;
- break;
- default:
- break;
- }
+ rate &= ~RX_PPDU_START_RATE_FLAG;
- status->rate_idx = rate_idx;
+ sband = &ar->mac.sbands[status->band];
+ status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate);
break;
case HTT_RX_HT:
case HTT_RX_HT_WITH_TXBF:
@@ -773,8 +723,87 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
}
}
+static struct ieee80211_channel *
+ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
+{
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ struct cfg80211_chan_def def;
+ u16 peer_id;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!rxd)
+ return NULL;
+
+ if (rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
+ return NULL;
+
+ if (!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
+ return NULL;
+
+ peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_PEER_IDX);
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer)
+ return NULL;
+
+ arvif = ath10k_get_arvif(ar, peer->vdev_id);
+ if (WARN_ON_ONCE(!arvif))
+ return NULL;
+
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ return NULL;
+
+ return def.chan;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
+{
+ struct ath10k_vif *arvif;
+ struct cfg80211_chan_def def;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_id == vdev_id &&
+ ath10k_mac_vif_chan(arvif->vif, &def) == 0)
+ return def.chan;
+ }
+
+ return NULL;
+}
+
+static void
+ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def *def = data;
+
+ *def = conf->def;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_any_channel(struct ath10k *ar)
+{
+ struct cfg80211_chan_def def = {};
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_htt_rx_h_any_chan_iter,
+ &def);
+
+ return def.chan;
+}
+
static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
- struct ieee80211_rx_status *status)
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd,
+ u32 vdev_id)
{
struct ieee80211_channel *ch;
@@ -782,6 +811,12 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
ch = ar->scan_channel;
if (!ch)
ch = ar->rx_channel;
+ if (!ch)
+ ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
+ if (!ch)
+ ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
+ if (!ch)
+ ch = ath10k_htt_rx_h_any_channel(ar);
spin_unlock_bh(&ar->data_lock);
if (!ch)
@@ -819,7 +854,8 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
struct sk_buff_head *amsdu,
- struct ieee80211_rx_status *status)
+ struct ieee80211_rx_status *status,
+ u32 vdev_id)
{
struct sk_buff *first;
struct htt_rx_desc *rxd;
@@ -851,7 +887,7 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
ath10k_htt_rx_h_signal(ar, status, rxd);
- ath10k_htt_rx_h_channel(ar, status);
+ ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
ath10k_htt_rx_h_rates(ar, status, rxd);
}
@@ -929,10 +965,16 @@ static void ath10k_process_rx(struct ath10k *ar,
ieee80211_rx(ar->hw, skb);
}
-static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
+static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
+ struct ieee80211_hdr *hdr)
{
- /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
- return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
+ int len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+ ar->fw_features))
+ len = round_up(len, 4);
+
+ return len;
}
static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
@@ -1031,7 +1073,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
/* pull decapped header and copy SA & DA */
hdr = (struct ieee80211_hdr *)msdu->data;
- hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
+ hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
ether_addr_copy(da, ieee80211_get_DA(hdr));
ether_addr_copy(sa, ieee80211_get_SA(hdr));
skb_pull(msdu, hdr_len);
@@ -1522,7 +1564,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
break;
}
- ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
@@ -1569,7 +1611,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
return;
}
- ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
@@ -1598,6 +1640,7 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
tx_done.no_ack = true;
break;
case HTT_DATA_TX_STATUS_OK:
+ tx_done.success = true;
break;
case HTT_DATA_TX_STATUS_DISCARD:
case HTT_DATA_TX_STATUS_POSTPONE:
@@ -1796,7 +1839,7 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
- ath10k_htt_rx_h_channel(ar, status);
+ ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
ath10k_process_rx(ar, status, msdu);
}
}
@@ -1869,7 +1912,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* better to report something than nothing though. This
* should still give an idea about rx rate to the user.
*/
- ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
ath10k_htt_rx_h_deliver(ar, &amsdu, status);
@@ -1892,6 +1935,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (struct htt_resp *)skb->data;
+ enum htt_t2h_msg_type type;
/* confirm alignment */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
@@ -1899,7 +1943,16 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
resp->hdr.msg_type);
- switch (resp->hdr.msg_type) {
+
+ if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
+ resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
+
+ switch (type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF: {
htt->target_version_major = resp->ver_resp.major;
htt->target_version_minor = resp->ver_resp.minor;
@@ -1937,6 +1990,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
switch (status) {
case HTT_MGMT_TX_STATUS_OK:
+ tx_done.success = true;
break;
case HTT_MGMT_TX_STATUS_RETRY:
tx_done.no_ack = true;
@@ -1976,7 +2030,6 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_TEST:
- /* FIX THIS */
break;
case HTT_T2H_MSG_TYPE_STATS_CONF:
trace_ath10k_htt_stats(ar, skb->data, skb->len);
@@ -2018,11 +2071,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
return;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
- /* FIXME: This WMI-TLV event is overlapping with 10.2
- * CHAN_CHANGE - both being 0xF. Neither is being used in
- * practice so no immediate action is necessary. Nevertheless
- * HTT may need an abstraction layer like WMI has one day.
- */
+ break;
+ case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
break;
default:
ath10k_warn(ar, "htt event (%d) not handled\n",
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index cbd2bc9e6202..a60ef7d1d5fc 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -26,7 +26,7 @@ void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
{
htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
- ieee80211_wake_queues(htt->ar->hw);
+ ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
}
static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
@@ -49,7 +49,7 @@ static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
htt->num_pending_tx++;
if (htt->num_pending_tx == htt->max_num_pending_tx)
- ieee80211_stop_queues(htt->ar->hw);
+ ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
exit:
spin_unlock_bh(&htt->tx_lock);
@@ -420,9 +420,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
int res;
u8 flags0 = 0;
u16 msdu_id, flags1 = 0;
- dma_addr_t paddr;
- u32 frags_paddr;
- bool use_frags;
+ dma_addr_t paddr = 0;
+ u32 frags_paddr = 0;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
@@ -440,12 +439,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
- /* Since HTT 3.0 there is no separate mgmt tx command. However in case
- * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
- * fragment list host driver specifies directly frame pointer. */
- use_frags = htt->target_version_major < 3 ||
- !ieee80211_is_mgmt(hdr->frame_control);
-
skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
&paddr);
if (!skb_cb->htt.txbuf) {
@@ -466,7 +459,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
if (res)
goto err_free_txbuf;
- if (likely(use_frags)) {
+ switch (skb_cb->txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+ /* pass through */
+ case ATH10K_HW_TXRX_ETHERNET:
frags = skb_cb->htt.txbuf->frags;
frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
@@ -474,15 +472,17 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
frags[1].paddr = 0;
frags[1].len = 0;
- flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
- HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
frags_paddr = skb_cb->htt.txbuf_paddr;
- } else {
+ break;
+ case ATH10K_HW_TXRX_MGMT:
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
frags_paddr = skb_cb->paddr;
+ break;
}
/* Normally all commands go through HTC which manages tx credits for
@@ -508,11 +508,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
prefetch_len);
skb_cb->htt.txbuf->htc_hdr.flags = 0;
- if (!ieee80211_has_protected(hdr->frame_control))
+ if (!skb_cb->is_protected)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
- flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
-
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
if (msdu->ip_summed == CHECKSUM_PARTIAL) {
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 460771fcfe9e..89e09cbeac19 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -78,6 +78,9 @@ enum qca6174_chip_id_rev {
/* added support for ATH10K_FW_IE_WMI_OP_VERSION */
#define ATH10K_FW_API4_FILE "firmware-4.bin"
+/* HTT id conflict fix for management frames over HTT */
+#define ATH10K_FW_API5_FILE "firmware-5.bin"
+
#define ATH10K_FW_UTF_FILE "utf.bin"
/* includes also the null byte */
@@ -104,6 +107,11 @@ enum ath10k_fw_ie_type {
* FW API 4 and above.
*/
ATH10K_FW_IE_WMI_OP_VERSION = 5,
+
+ /* HTT "operations" interface version, 32 bit value. Supported from
+ * FW API 5 and above.
+ */
+ ATH10K_FW_IE_HTT_OP_VERSION = 6,
};
enum ath10k_fw_wmi_op_version {
@@ -119,6 +127,20 @@ enum ath10k_fw_wmi_op_version {
ATH10K_FW_WMI_OP_VERSION_MAX,
};
+enum ath10k_fw_htt_op_version {
+ ATH10K_FW_HTT_OP_VERSION_UNSET = 0,
+
+ ATH10K_FW_HTT_OP_VERSION_MAIN = 1,
+
+ /* also used in 10.2 and 10.2.4 branches */
+ ATH10K_FW_HTT_OP_VERSION_10_1 = 2,
+
+ ATH10K_FW_HTT_OP_VERSION_TLV = 3,
+
+ /* keep last */
+ ATH10K_FW_HTT_OP_VERSION_MAX,
+};
+
enum ath10k_hw_rev {
ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174,
@@ -180,6 +202,27 @@ struct ath10k_pktlog_hdr {
u8 payload[0];
} __packed;
+enum ath10k_hw_rate_ofdm {
+ ATH10K_HW_RATE_OFDM_48M = 0,
+ ATH10K_HW_RATE_OFDM_24M,
+ ATH10K_HW_RATE_OFDM_12M,
+ ATH10K_HW_RATE_OFDM_6M,
+ ATH10K_HW_RATE_OFDM_54M,
+ ATH10K_HW_RATE_OFDM_36M,
+ ATH10K_HW_RATE_OFDM_18M,
+ ATH10K_HW_RATE_OFDM_9M,
+};
+
+enum ath10k_hw_rate_cck {
+ ATH10K_HW_RATE_CCK_LP_11M = 0,
+ ATH10K_HW_RATE_CCK_LP_5_5M,
+ ATH10K_HW_RATE_CCK_LP_2M,
+ ATH10K_HW_RATE_CCK_LP_1M,
+ ATH10K_HW_RATE_CCK_SP_11M,
+ ATH10K_HW_RATE_CCK_SP_5_5M,
+ ATH10K_HW_RATE_CCK_SP_2M,
+};
+
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
@@ -223,7 +266,7 @@ struct ath10k_pktlog_hdr {
#define TARGET_10X_NUM_WDS_ENTRIES 32
#define TARGET_10X_DMA_BURST_SIZE 0
#define TARGET_10X_MAC_AGGR_DELIM 0
-#define TARGET_10X_AST_SKID_LIMIT 16
+#define TARGET_10X_AST_SKID_LIMIT 128
#define TARGET_10X_NUM_STATIONS 128
#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
(TARGET_10X_NUM_VDEVS))
@@ -256,13 +299,13 @@ struct ath10k_pktlog_hdr {
#define TARGET_10_2_DMA_BURST_SIZE 1
/* Target specific defines for WMI-TLV firmware */
-#define TARGET_TLV_NUM_VDEVS 3
+#define TARGET_TLV_NUM_VDEVS 4
#define TARGET_TLV_NUM_STATIONS 32
-#define TARGET_TLV_NUM_PEERS ((TARGET_TLV_NUM_STATIONS) + \
- (TARGET_TLV_NUM_VDEVS) + \
- 2)
+#define TARGET_TLV_NUM_PEERS 35
+#define TARGET_TLV_NUM_TDLS_VDEVS 1
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
+#define TARGET_TLV_NUM_WOW_PATTERNS 22
/* Number of Copy Engines supported */
#define CE_COUNT 8
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 973485bd4121..0ed422ae46a4 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -28,7 +28,131 @@
#include "txrx.h"
#include "testmode.h"
#include "wmi.h"
+#include "wmi-tlv.h"
#include "wmi-ops.h"
+#include "wow.h"
+
+/*********/
+/* Rates */
+/*********/
+
+static struct ieee80211_rate ath10k_rates[] = {
+ { .bitrate = 10,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
+};
+
+#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
+
+#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
+ ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_g_rates (ath10k_rates + 0)
+#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+
+static bool ath10k_mac_bitrate_is_cck(int bitrate)
+{
+ switch (bitrate) {
+ case 10:
+ case 20:
+ case 55:
+ case 110:
+ return true;
+ }
+
+ return false;
+}
+
+static u8 ath10k_mac_bitrate_to_rate(int bitrate)
+{
+ return DIV_ROUND_UP(bitrate, 5) |
+ (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate)
+{
+ const struct ieee80211_rate *rate;
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ rate = &sband->bitrates[i];
+
+ if (rate->hw_value == hw_rate)
+ return i;
+ else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+ rate->hw_value_short == hw_rate)
+ return i;
+ }
+
+ return 0;
+}
+
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++)
+ if (sband->bitrates[i].bitrate == bitrate)
+ return i;
+
+ return 0;
+}
+
+static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ }
+ return 0;
+}
+
+static u32
+ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+ if (ht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u32
+ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+ if (vht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
/**********/
/* Crypto */
@@ -37,7 +161,7 @@
static int ath10k_send_key(struct ath10k_vif *arvif,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd,
- const u8 *macaddr, bool def_idx)
+ const u8 *macaddr, u32 flags)
{
struct ath10k *ar = arvif->ar;
struct wmi_vdev_install_key_arg arg = {
@@ -45,16 +169,12 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
.key_idx = key->keyidx,
.key_len = key->keylen,
.key_data = key->key,
+ .key_flags = flags,
.macaddr = macaddr,
};
lockdep_assert_held(&arvif->ar->conf_mutex);
- if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
- arg.key_flags = WMI_KEY_PAIRWISE;
- else
- arg.key_flags = WMI_KEY_GROUP;
-
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
arg.key_cipher = WMI_CIPHER_AES_CCM;
@@ -68,17 +188,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
arg.key_cipher = WMI_CIPHER_WEP;
- /* AP/IBSS mode requires self-key to be groupwise
- * Otherwise pairwise key must be set */
- if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
- arg.key_flags = WMI_KEY_PAIRWISE;
-
- if (def_idx)
- arg.key_flags |= WMI_KEY_TX_USAGE;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
- /* this one needs to be done in software */
- return 1;
+ WARN_ON(1);
+ return -EINVAL;
default:
ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
return -EOPNOTSUPP;
@@ -95,21 +208,22 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
static int ath10k_install_key(struct ath10k_vif *arvif,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd,
- const u8 *macaddr, bool def_idx)
+ const u8 *macaddr, u32 flags)
{
struct ath10k *ar = arvif->ar;
int ret;
+ unsigned long time_left;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->install_key_done);
- ret = ath10k_send_key(arvif, key, cmd, macaddr, def_idx);
+ ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
if (ret)
return ret;
- ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ);
- if (ret == 0)
+ time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
+ if (time_left == 0)
return -ETIMEDOUT;
return 0;
@@ -122,7 +236,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
struct ath10k_peer *peer;
int ret;
int i;
- bool def_idx;
+ u32 flags;
lockdep_assert_held(&ar->conf_mutex);
@@ -136,14 +250,20 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
if (arvif->wep_keys[i] == NULL)
continue;
- /* set TX_USAGE flag for default key id */
- if (arvif->def_wep_key_idx == i)
- def_idx = true;
- else
- def_idx = false;
+
+ flags = 0;
+ flags |= WMI_KEY_PAIRWISE;
+
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
+ addr, flags);
+ if (ret)
+ return ret;
+
+ flags = 0;
+ flags |= WMI_KEY_GROUP;
ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
- addr, def_idx);
+ addr, flags);
if (ret)
return ret;
@@ -152,6 +272,27 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
spin_unlock_bh(&ar->data_lock);
}
+ /* In some cases (notably with static WEP IBSS with multiple keys)
+ * multicast Tx becomes broken. Both pairwise and groupwise keys are
+ * installed already. Using WMI_KEY_TX_USAGE in different combinations
+ * didn't seem help. Using def_keyid vdev parameter seems to be
+ * effective so use that.
+ *
+ * FIXME: Revisit. Perhaps this can be done in a less hacky way.
+ */
+ if (arvif->def_wep_key_idx == -1)
+ return 0;
+
+ ret = ath10k_wmi_vdev_set_param(arvif->ar,
+ arvif->vdev_id,
+ arvif->ar->wmi.vdev_param->def_keyid,
+ arvif->def_wep_key_idx);
+ if (ret) {
+ ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
return 0;
}
@@ -163,6 +304,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
int first_errno = 0;
int ret;
int i;
+ u32 flags = 0;
lockdep_assert_held(&ar->conf_mutex);
@@ -179,7 +321,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
/* key flags are not required to delete the key */
ret = ath10k_install_key(arvif, peer->keys[i],
- DISABLE_KEY, addr, false);
+ DISABLE_KEY, addr, flags);
if (ret && first_errno == 0)
first_errno = ret;
@@ -229,6 +371,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
int first_errno = 0;
int ret;
int i;
+ u32 flags = 0;
lockdep_assert_held(&ar->conf_mutex);
@@ -254,7 +397,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
if (i == ARRAY_SIZE(peer->keys))
break;
/* key flags are not required to delete the key */
- ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, false);
+ ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
if (ret && first_errno == 0)
first_errno = ret;
@@ -266,6 +409,39 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
return first_errno;
}
+static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(peer, &ar->peers, list) {
+ if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN))
+ continue;
+
+ if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN))
+ continue;
+
+ if (peer->keys[key->keyidx] == key)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
+ arvif->vdev_id, key->keyidx);
+
+ ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
+ arvif->vdev_id, peer->addr, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/*********************/
/* General utilities */
/*********************/
@@ -364,7 +540,56 @@ static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
}
}
-static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ieee80211_chanctx_conf *conf;
+
+ rcu_read_lock();
+ conf = rcu_dereference(vif->chanctx_conf);
+ if (!conf) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ *def = conf->def;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ int *num = data;
+
+ (*num)++;
+}
+
+static int ath10k_mac_num_chanctxs(struct ath10k *ar)
+{
+ int num = 0;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_num_chanctxs_iter,
+ &num);
+
+ return num;
+}
+
+static void
+ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def **def = data;
+
+ *def = &conf->def;
+}
+
+static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
+ enum wmi_peer_type peer_type)
{
int ret;
@@ -373,7 +598,7 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
if (ar->num_peers >= ar->max_num_peers)
return -ENOBUFS;
- ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
+ ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
if (ret) {
ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
addr, vdev_id, ret);
@@ -517,6 +742,38 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar)
ar->num_stations = 0;
}
+static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
+ struct ieee80211_sta *sta,
+ enum wmi_tdls_peer_state state)
+{
+ int ret;
+ struct wmi_tdls_peer_update_cmd_arg arg = {};
+ struct wmi_tdls_peer_capab_arg cap = {};
+ struct wmi_channel_arg chan_arg = {};
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arg.vdev_id = vdev_id;
+ arg.peer_state = state;
+ ether_addr_copy(arg.addr, sta->addr);
+
+ cap.peer_max_sp = sta->max_sp;
+ cap.peer_uapsd_queues = sta->uapsd_queues;
+
+ if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
+ !sta->tdls_initiator)
+ cap.is_peer_responder = 1;
+
+ ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
+ arg.addr, vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/************************/
/* Interface management */
/************************/
@@ -561,16 +818,16 @@ static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
{
- int ret;
+ unsigned long time_left;
lockdep_assert_held(&ar->conf_mutex);
if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
return -ESHUTDOWN;
- ret = wait_for_completion_timeout(&ar->vdev_setup_done,
- ATH10K_VDEV_SETUP_TIMEOUT_HZ);
- if (ret == 0)
+ time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH10K_VDEV_SETUP_TIMEOUT_HZ);
+ if (time_left == 0)
return -ETIMEDOUT;
return 0;
@@ -578,13 +835,21 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
{
- struct cfg80211_chan_def *chandef = &ar->chandef;
+ struct cfg80211_chan_def *chandef = NULL;
struct ieee80211_channel *channel = chandef->chan;
struct wmi_vdev_start_request_arg arg = {};
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_get_any_chandef_iter,
+ &chandef);
+ if (WARN_ON_ONCE(!chandef))
+ return -ENOENT;
+
+ channel = chandef->chan;
+
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = chandef->center_freq1;
@@ -766,27 +1031,78 @@ static int ath10k_monitor_stop(struct ath10k *ar)
return 0;
}
+static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
+{
+ int num_ctx;
+
+ /* At least one chanctx is required to derive a channel to start
+ * monitor vdev on.
+ */
+ num_ctx = ath10k_mac_num_chanctxs(ar);
+ if (num_ctx == 0)
+ return false;
+
+ /* If there's already an existing special monitor interface then don't
+ * bother creating another monitor vdev.
+ */
+ if (ar->monitor_arvif)
+ return false;
+
+ return ar->monitor ||
+ test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+}
+
+static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
+{
+ int num_ctx;
+
+ num_ctx = ath10k_mac_num_chanctxs(ar);
+
+ /* FIXME: Current interface combinations and cfg80211/mac80211 code
+ * shouldn't allow this but make sure to prevent handling the following
+ * case anyway since multi-channel DFS hasn't been tested at all.
+ */
+ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
+ return false;
+
+ return true;
+}
+
static int ath10k_monitor_recalc(struct ath10k *ar)
{
- bool should_start;
+ bool needed;
+ bool allowed;
+ int ret;
lockdep_assert_held(&ar->conf_mutex);
- should_start = ar->monitor ||
- ar->filter_flags & FIF_PROMISC_IN_BSS ||
- test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ needed = ath10k_mac_monitor_vdev_is_needed(ar);
+ allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac monitor recalc started? %d should? %d\n",
- ar->monitor_started, should_start);
+ "mac monitor recalc started? %d needed? %d allowed? %d\n",
+ ar->monitor_started, needed, allowed);
- if (should_start == ar->monitor_started)
+ if (WARN_ON(needed && !allowed)) {
+ if (ar->monitor_started) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
+
+ ret = ath10k_monitor_stop(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", ret);
+ /* not serious */
+ }
+
+ return -EPERM;
+ }
+
+ if (needed == ar->monitor_started)
return 0;
- if (should_start)
+ if (needed)
return ath10k_monitor_start(ar);
-
- return ath10k_monitor_stop(ar);
+ else
+ return ath10k_monitor_stop(ar);
}
static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
@@ -798,12 +1114,14 @@ static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
vdev_param = ar->wmi.vdev_param->enable_rtscts;
- if (arvif->use_cts_prot || arvif->num_legacy_stations > 0)
- rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
+ rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
if (arvif->num_legacy_stations > 0)
rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
WMI_RTSCTS_PROFILE);
+ else
+ rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
+ WMI_RTSCTS_PROFILE);
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
rts_cts);
@@ -846,6 +1164,27 @@ static int ath10k_stop_cac(struct ath10k *ar)
return 0;
}
+static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ bool *ret = data;
+
+ if (!*ret && conf->radar_enabled)
+ *ret = true;
+}
+
+static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
+{
+ bool has_radar = false;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_has_radar_iter,
+ &has_radar);
+
+ return has_radar;
+}
+
static void ath10k_recalc_radar_detection(struct ath10k *ar)
{
int ret;
@@ -854,7 +1193,7 @@ static void ath10k_recalc_radar_detection(struct ath10k *ar)
ath10k_stop_cac(ar);
- if (!ar->radar_enabled)
+ if (!ath10k_mac_has_radar_enabled(ar))
return;
if (ar->num_started_vdevs > 0)
@@ -872,10 +1211,44 @@ static void ath10k_recalc_radar_detection(struct ath10k *ar)
}
}
-static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ if (ar->num_started_vdevs != 0) {
+ ar->num_started_vdevs--;
+ ath10k_recalc_radar_detection(ar);
+ }
+
+ return ret;
+}
+
+static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *chandef,
+ bool restart)
{
struct ath10k *ar = arvif->ar;
- struct cfg80211_chan_def *chandef = &ar->chandef;
struct wmi_vdev_start_request_arg arg = {};
int ret = 0;
@@ -939,47 +1312,16 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
return ret;
}
-static int ath10k_vdev_start(struct ath10k_vif *arvif)
-{
- return ath10k_vdev_start_restart(arvif, false);
-}
-
-static int ath10k_vdev_restart(struct ath10k_vif *arvif)
+static int ath10k_vdev_start(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *def)
{
- return ath10k_vdev_start_restart(arvif, true);
+ return ath10k_vdev_start_restart(arvif, def, false);
}
-static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+static int ath10k_vdev_restart(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *def)
{
- struct ath10k *ar = arvif->ar;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- reinit_completion(&ar->vdev_setup_done);
-
- ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
- if (ret) {
- ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
-
- ret = ath10k_vdev_setup_sync(ar);
- if (ret) {
- ath10k_warn(ar, "failed to synchronize setup for vdev %i stop: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
-
- WARN_ON(ar->num_started_vdevs == 0);
-
- if (ar->num_started_vdevs != 0) {
- ar->num_started_vdevs--;
- ath10k_recalc_radar_detection(ar);
- }
-
- return ret;
+ return ath10k_vdev_start_restart(arvif, def, true);
}
static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
@@ -1056,6 +1398,10 @@ static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
return 0;
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ return 0;
+
bcn = ieee80211_beacon_get_template(hw, vif, &offs);
if (!bcn) {
ath10k_warn(ar, "failed to get beacon template from mac80211\n");
@@ -1101,6 +1447,9 @@ static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
return 0;
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
prb = ieee80211_proberesp_get(hw, vif);
if (!prb) {
ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
@@ -1119,6 +1468,80 @@ static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
return 0;
}
+static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct cfg80211_chan_def def;
+ int ret;
+
+ /* When originally vdev is started during assign_vif_chanctx() some
+ * information is missing, notably SSID. Firmware revisions with beacon
+ * offloading require the SSID to be provided during vdev (re)start to
+ * handle hidden SSID properly.
+ *
+ * Vdev restart must be done after vdev has been both started and
+ * upped. Otherwise some firmware revisions (at least 10.2) fail to
+ * deliver vdev restart response event causing timeouts during vdev
+ * syncing in ath10k.
+ *
+ * Note: The vdev down/up and template reinstallation could be skipped
+ * since only wmi-tlv firmware are known to have beacon offload and
+ * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
+ * response delivery. It's probably more robust to keep it as is.
+ */
+ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+ return 0;
+
+ if (WARN_ON(!arvif->is_started))
+ return -EINVAL;
+
+ if (WARN_ON(!arvif->is_up))
+ return -EINVAL;
+
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ return -EINVAL;
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
+ * firmware will crash upon vdev up.
+ */
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to update presp template: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_restart(arvif, &def);
+ if (ret) {
+ ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void ath10k_control_beaconing(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info)
{
@@ -1128,9 +1551,11 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
lockdep_assert_held(&arvif->ar->conf_mutex);
if (!info->enable_beacon) {
- ath10k_vdev_stop(arvif);
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
+ arvif->vdev_id, ret);
- arvif->is_started = false;
arvif->is_up = false;
spin_lock_bh(&arvif->ar->data_lock);
@@ -1142,10 +1567,6 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
arvif->tx_seq_no = 0x1000;
- ret = ath10k_vdev_start(arvif);
- if (ret)
- return;
-
arvif->aid = 0;
ether_addr_copy(arvif->bssid, info->bssid);
@@ -1154,13 +1575,18 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
if (ret) {
ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
arvif->vdev_id, ret);
- ath10k_vdev_stop(arvif);
return;
}
- arvif->is_started = true;
arvif->is_up = true;
+ ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
@@ -1175,11 +1601,6 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
lockdep_assert_held(&arvif->ar->conf_mutex);
if (!info->ibss_joined) {
- ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
- if (ret)
- ath10k_warn(ar, "failed to delete IBSS self peer %pM for vdev %d: %d\n",
- self_peer, arvif->vdev_id, ret);
-
if (is_zero_ether_addr(arvif->bssid))
return;
@@ -1188,13 +1609,6 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
return;
}
- ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
- if (ret) {
- ath10k_warn(ar, "failed to create IBSS self peer %pM for vdev %d: %d\n",
- self_peer, arvif->vdev_id, ret);
- return;
- }
-
vdev_param = arvif->ar->wmi.vdev_param->atim_window;
ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
ATH10K_DEFAULT_ATIM);
@@ -1294,7 +1708,14 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
enable_ps = false;
}
- if (enable_ps) {
+ if (!arvif->is_started) {
+ /* mac80211 can update vif powersave state while disconnected.
+ * Firmware doesn't behave nicely and consumes more power than
+ * necessary if PS is disabled on a non-started vdev. Hence
+ * force-enable PS for non-running vdevs.
+ */
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ } else if (enable_ps) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
@@ -1361,6 +1782,123 @@ static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
return 0;
}
+static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
+ return;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return;
+
+ if (!vif->csa_active)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ if (!ieee80211_csa_is_complete(vif)) {
+ ieee80211_csa_update_counter(vif);
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+ ret);
+ } else {
+ ieee80211_csa_finish(vif);
+ }
+}
+
+static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
+{
+ struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+ ap_csa_work);
+ struct ath10k *ar = arvif->ar;
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_mac_vif_ap_csa_count_down(arvif);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = data;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
+ return;
+
+ cancel_delayed_work(&arvif->connection_loss_work);
+}
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_handle_beacon_iter,
+ skb);
+}
+
+static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u32 *vdev_id = data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+
+ if (arvif->vdev_id != *vdev_id)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_beacon_loss(vif);
+
+ /* Firmware doesn't report beacon loss events repeatedly. If AP probe
+ * (done by mac80211) succeeds but beacons do not resume then it
+ * doesn't make sense to continue operation. Queue connection loss work
+ * which can be cancelled when beacon is received.
+ */
+ ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
+ ATH10K_CONNECTION_LOSS_HZ);
+}
+
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_handle_beacon_miss_iter,
+ &vdev_id);
+}
+
+static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
+{
+ struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+ connection_loss_work.work);
+ struct ieee80211_vif *vif = arvif->vif;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_connection_loss(vif);
+}
+
/**********************/
/* Station management */
/**********************/
@@ -1388,12 +1926,18 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
struct wmi_peer_assoc_complete_arg *arg)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ u32 aid;
lockdep_assert_held(&ar->conf_mutex);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ aid = vif->bss_conf.aid;
+ else
+ aid = sta->aid;
+
ether_addr_copy(arg->addr, sta->addr);
arg->vdev_id = arvif->vdev_id;
- arg->peer_aid = sta->aid;
+ arg->peer_aid = aid;
arg->peer_flags |= WMI_PEER_AUTH;
arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
arg->peer_num_spatial_streams = 1;
@@ -1405,15 +1949,18 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
struct wmi_peer_assoc_complete_arg *arg)
{
struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct cfg80211_chan_def def;
struct cfg80211_bss *bss;
const u8 *rsnie = NULL;
const u8 *wpaie = NULL;
lockdep_assert_held(&ar->conf_mutex);
- bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
- info->bssid, NULL, 0, IEEE80211_BSS_TYPE_ANY,
- IEEE80211_PRIVACY_ANY);
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (bss) {
const struct cfg80211_bss_ies *ies;
@@ -1443,19 +1990,29 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
}
static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
+ enum ieee80211_band band;
u32 ratemask;
+ u8 rate;
int i;
lockdep_assert_held(&ar->conf_mutex);
- sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
- ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ sband = ar->hw->wiphy->bands[band];
+ ratemask = sta->supp_rates[band];
+ ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
rateset->num_rates = 0;
@@ -1464,24 +2021,66 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
if (!(ratemask & 1))
continue;
- rateset->rates[rateset->num_rates] = rates->hw_value;
+ rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
+ rateset->rates[rateset->num_rates] = rate;
rateset->num_rates++;
}
}
+static bool
+ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+ if (ht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static bool
+ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+ if (vht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- int i, n;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum ieee80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ int i, n, max_nss;
u32 stbc;
lockdep_assert_held(&ar->conf_mutex);
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
if (!ht_cap->ht_supported)
return;
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
+ ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
arg->peer_flags |= WMI_PEER_HT;
arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ht_cap->ampdu_factor)) - 1;
@@ -1500,11 +2099,13 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
}
- if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
- arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+ if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
- if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
- arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+ arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+ }
if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
@@ -1524,9 +2125,12 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
else if (ht_cap->mcs.rx_mask[1])
arg->peer_rate_caps |= WMI_RC_DS_FLAG;
- for (i = 0, n = 0; i < IEEE80211_HT_MCS_MASK_LEN*8; i++)
- if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
+ for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+ if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+ (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+ max_nss = (i / 8) + 1;
arg->peer_ht_rates.rates[n++] = i;
+ }
/*
* This is a workaround for HT-enabled STAs which break the spec
@@ -1543,7 +2147,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_ht_rates.rates[i] = i;
} else {
arg->peer_ht_rates.num_rates = n;
- arg->peer_num_spatial_streams = sta->rx_nss;
+ arg->peer_num_spatial_streams = max_nss;
}
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -1619,19 +2223,84 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
return 0;
}
+static u16
+ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+ const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+ vht_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0: /* fall through */
+ case 1: /* fall through */
+ case 2: /* fall through */
+ case 3: /* fall through */
+ case 4: /* fall through */
+ case 5: /* fall through */
+ case 6: /* fall through */
+ default:
+ /* see ath10k_mac_can_set_bitrate_mask() */
+ WARN_ON(1);
+ /* fall through */
+ case -1:
+ mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+ break;
+ case 7:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+ break;
+ case 9:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum ieee80211_band band;
+ const u16 *vht_mcs_mask;
u8 ampdu_factor;
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
if (!vht_cap->vht_supported)
return;
+ band = def.chan->band;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
arg->peer_flags |= WMI_PEER_VHT;
- if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+ if (def.chan->band == IEEE80211_BAND_2GHZ)
arg->peer_flags |= WMI_PEER_VHT_2G;
arg->peer_vht_caps = vht_cap->cap;
@@ -1657,8 +2326,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
__le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
arg->peer_vht_rates.tx_max_rate =
__le16_to_cpu(vht_cap->vht_mcs.tx_highest);
- arg->peer_vht_rates.tx_mcs_set =
- __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
+ arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
+ __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
sta->addr, arg->peer_max_mpdu, arg->peer_flags);
@@ -1697,10 +2366,10 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
sta->addr, !!(arg->peer_flags & WMI_PEER_QOS));
}
-static bool ath10k_mac_sta_has_11g_rates(struct ieee80211_sta *sta)
+static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
{
- /* First 4 rates in ath10k_rates are CCK (11b) rates. */
- return sta->supp_rates[IEEE80211_BAND_2GHZ] >> 4;
+ return sta->supp_rates[IEEE80211_BAND_2GHZ] >>
+ ATH10K_MAC_FIRST_OFDM_RATE_IDX;
}
static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
@@ -1708,21 +2377,35 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum ieee80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
enum wmi_phy_mode phymode = MODE_UNKNOWN;
- switch (ar->hw->conf.chandef.chan->band) {
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ switch (band) {
case IEEE80211_BAND_2GHZ:
- if (sta->vht_cap.vht_supported) {
+ if (sta->vht_cap.vht_supported &&
+ !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else
phymode = MODE_11AC_VHT20;
- } else if (sta->ht_cap.ht_supported) {
+ } else if (sta->ht_cap.ht_supported &&
+ !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NG_HT40;
else
phymode = MODE_11NG_HT20;
- } else if (ath10k_mac_sta_has_11g_rates(sta)) {
+ } else if (ath10k_mac_sta_has_ofdm_only(sta)) {
phymode = MODE_11G;
} else {
phymode = MODE_11B;
@@ -1733,15 +2416,17 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
/*
* Check VHT first.
*/
- if (sta->vht_cap.vht_supported) {
+ if (sta->vht_cap.vht_supported &&
+ !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AC_VHT80;
else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
phymode = MODE_11AC_VHT20;
- } else if (sta->ht_cap.ht_supported) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ } else if (sta->ht_cap.ht_supported &&
+ !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
phymode = MODE_11NA_HT20;
@@ -1772,9 +2457,9 @@ static int ath10k_peer_assoc_prepare(struct ath10k *ar,
ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
ath10k_peer_assoc_h_crypto(ar, vif, arg);
- ath10k_peer_assoc_h_rates(ar, sta, arg);
- ath10k_peer_assoc_h_ht(ar, sta, arg);
- ath10k_peer_assoc_h_vht(ar, sta, arg);
+ ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
@@ -1993,6 +2678,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
}
arvif->is_up = false;
+
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
}
static int ath10k_station_assoc(struct ath10k *ar,
@@ -2013,7 +2700,6 @@ static int ath10k_station_assoc(struct ath10k *ar,
return ret;
}
- peer_arg.peer_reassoc = reassoc;
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
@@ -2274,6 +2960,149 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
/* TX handlers */
/***************/
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
+{
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+ ar->tx_paused |= BIT(reason);
+ ieee80211_stop_queues(ar->hw);
+}
+
+static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ if (arvif->tx_paused)
+ return;
+
+ ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
+{
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+ ar->tx_paused &= ~BIT(reason);
+
+ if (ar->tx_paused)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath10k_mac_tx_unlock_iter,
+ ar);
+}
+
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= BITS_PER_LONG);
+ arvif->tx_paused |= BIT(reason);
+ ieee80211_stop_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= BITS_PER_LONG);
+ arvif->tx_paused &= ~BIT(reason);
+
+ if (ar->tx_paused)
+ return;
+
+ if (arvif->tx_paused)
+ return;
+
+ ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ switch (pause_id) {
+ case WMI_TLV_TX_PAUSE_ID_MCC:
+ case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+ case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+ case WMI_TLV_TX_PAUSE_ID_AP_PS:
+ case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+ switch (action) {
+ case WMI_TLV_TX_PAUSE_ACTION_STOP:
+ ath10k_mac_vif_tx_lock(arvif, pause_id);
+ break;
+ case WMI_TLV_TX_PAUSE_ACTION_WAKE:
+ ath10k_mac_vif_tx_unlock(arvif, pause_id);
+ break;
+ default:
+ ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
+ action, arvif->vdev_id);
+ break;
+ }
+ break;
+ case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+ case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+ case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+ case WMI_TLV_TX_PAUSE_ID_HOST:
+ default:
+ /* FIXME: Some pause_ids aren't vdev specific. Instead they
+ * target peer_id and tid. Implementing these could improve
+ * traffic scheduling fairness across multiple connected
+ * stations in AP/IBSS modes.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac ignoring unsupported tx pause vdev %i id %d\n",
+ arvif->vdev_id, pause_id);
+ break;
+ }
+}
+
+struct ath10k_mac_tx_pause {
+ u32 vdev_id;
+ enum wmi_tlv_tx_pause_id pause_id;
+ enum wmi_tlv_tx_pause_action action;
+};
+
+static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_mac_tx_pause *arg = data;
+
+ ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
+}
+
+void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action)
+{
+ struct ath10k_mac_tx_pause arg = {
+ .vdev_id = vdev_id,
+ .pause_id = pause_id,
+ .action = action,
+ };
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath10k_mac_handle_tx_pause_iter,
+ &arg);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
{
if (ieee80211_is_mgmt(hdr->frame_control))
@@ -2300,6 +3129,52 @@ static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif)
return 0;
}
+static enum ath10k_hw_txrx_mode
+ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct sk_buff *skb)
+{
+ const struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+
+ if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
+ return ATH10K_HW_TXRX_RAW;
+
+ if (ieee80211_is_mgmt(fc))
+ return ATH10K_HW_TXRX_MGMT;
+
+ /* Workaround:
+ *
+ * NullFunc frames are mostly used to ping if a client or AP are still
+ * reachable and responsive. This implies tx status reports must be
+ * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
+ * come to a conclusion that the other end disappeared and tear down
+ * BSS connection or it can never disconnect from BSS/client (which is
+ * the case).
+ *
+ * Firmware with HTT older than 3.0 delivers incorrect tx status for
+ * NullFunc frames to driver. However there's a HTT Mgmt Tx command
+ * which seems to deliver correct tx reports for NullFunc frames. The
+ * downside of using it is it ignores client powersave state so it can
+ * end up disconnecting sleeping clients in AP mode. It should fix STA
+ * mode though because AP don't sleep.
+ */
+ if (ar->htt.target_version_major < 3 &&
+ (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
+ !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features))
+ return ATH10K_HW_TXRX_MGMT;
+
+ /* Workaround:
+ *
+ * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
+ * NativeWifi txmode - it selects AP key instead of peer key. It seems
+ * to work with Ethernet txmode so use it.
+ */
+ if (ieee80211_is_data_present(fc) && sta && sta->tdls)
+ return ATH10K_HW_TXRX_ETHERNET;
+
+ return ATH10K_HW_TXRX_NATIVE_WIFI;
+}
+
/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
* Control in the header.
*/
@@ -2317,16 +3192,42 @@ static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
skb->data, (void *)qos_ctl - (void *)skb->data);
skb_pull(skb, IEEE80211_QOS_CTL_LEN);
- /* Fw/Hw generates a corrupted QoS Control Field for QoS NullFunc
- * frames. Powersave is handled by the fw/hw so QoS NyllFunc frames are
- * used only for CQM purposes (e.g. hostapd station keepalive ping) so
- * it is safe to downgrade to NullFunc.
+ /* Some firmware revisions don't handle sending QoS NullFunc well.
+ * These frames are mainly used for CQM purposes so it doesn't really
+ * matter whether QoS NullFunc or NullFunc are sent.
*/
hdr = (void *)skb->data;
- if (ieee80211_is_qos_nullfunc(hdr->frame_control)) {
- hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control))
cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
- }
+
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static void ath10k_tx_h_8023(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct rfc1042_hdr *rfc1042;
+ struct ethhdr *eth;
+ size_t hdrlen;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ __be16 type;
+
+ hdr = (void *)skb->data;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ rfc1042 = (void *)skb->data + hdrlen;
+
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ type = rfc1042->snap_type;
+
+ skb_pull(skb, hdrlen + sizeof(*rfc1042));
+ skb_push(skb, sizeof(*eth));
+
+ eth = (void *)skb->data;
+ ether_addr_copy(eth->h_dest, da);
+ ether_addr_copy(eth->h_source, sa);
+ eth->h_proto = type;
}
static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
@@ -2365,45 +3266,51 @@ static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar)
ar->htt.target_version_minor >= 4);
}
-static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
int ret = 0;
- if (ar->htt.target_version_major >= 3) {
- /* Since HTT 3.0 there is no separate mgmt tx command */
- ret = ath10k_htt_tx(&ar->htt, skb);
- goto exit;
+ spin_lock_bh(&ar->data_lock);
+
+ if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
+ ath10k_warn(ar, "wmi mgmt tx queue is full\n");
+ ret = -ENOSPC;
+ goto unlock;
}
- if (ieee80211_is_mgmt(hdr->frame_control)) {
- if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
- ar->fw_features)) {
- if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
- ATH10K_MAX_NUM_MGMT_PENDING) {
- ath10k_warn(ar, "reached WMI management transmit queue limit\n");
- ret = -EBUSY;
- goto exit;
- }
+ __skb_queue_tail(q, skb);
+ ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
- skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb);
- ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
- } else {
- ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
- }
- } else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
- ar->fw_features) &&
- ieee80211_is_nullfunc(hdr->frame_control)) {
- /* FW does not report tx status properly for NullFunc frames
- * unless they are sent through mgmt tx path. mac80211 sends
- * those frames when it detects link/beacon loss and depends
- * on the tx status to be correct. */
- ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
- } else {
- ret = ath10k_htt_tx(&ar->htt, skb);
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static void ath10k_mac_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+ struct ath10k_htt *htt = &ar->htt;
+ int ret = 0;
+
+ switch (cb->txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ case ATH10K_HW_TXRX_ETHERNET:
+ ret = ath10k_htt_tx(htt, skb);
+ break;
+ case ATH10K_HW_TXRX_MGMT:
+ if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->fw_features))
+ ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+ else if (ar->htt.target_version_major >= 3)
+ ret = ath10k_htt_tx(htt, skb);
+ else
+ ret = ath10k_htt_mgmt_tx(htt, skb);
+ break;
}
-exit:
if (ret) {
ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
ret);
@@ -2433,6 +3340,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
const u8 *peer_addr;
int vdev_id;
int ret;
+ unsigned long time_left;
/* FW requirement: We must create a peer before FW will send out
* an offchannel frame. Otherwise the frame will be stuck and
@@ -2465,7 +3373,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
peer_addr, vdev_id);
if (!peer) {
- ret = ath10k_peer_create(ar, vdev_id, peer_addr);
+ ret = ath10k_peer_create(ar, vdev_id, peer_addr,
+ WMI_PEER_TYPE_DEFAULT);
if (ret)
ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
peer_addr, vdev_id, ret);
@@ -2476,11 +3385,11 @@ void ath10k_offchan_tx_work(struct work_struct *work)
ar->offchan_tx_skb = skb;
spin_unlock_bh(&ar->data_lock);
- ath10k_tx_htt(ar, skb);
+ ath10k_mac_tx(ar, skb);
- ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
- 3 * HZ);
- if (ret == 0)
+ time_left =
+ wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
+ if (time_left == 0)
ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
skb);
@@ -2700,21 +3609,38 @@ static void ath10k_tx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_sta *sta = control->sta;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 fc = hdr->frame_control;
/* We should disable CCK RATE due to P2P */
if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
ATH10K_SKB_CB(skb)->htt.is_offchan = false;
+ ATH10K_SKB_CB(skb)->htt.freq = 0;
ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
+ ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
+ ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
- /* it makes no sense to process injected frames like that */
- if (vif && vif->type != NL80211_IFTYPE_MONITOR) {
+ switch (ATH10K_SKB_CB(skb)->txmode) {
+ case ATH10K_HW_TXRX_MGMT:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
ath10k_tx_h_nwifi(hw, skb);
ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
ath10k_tx_h_seq_no(vif, skb);
+ break;
+ case ATH10K_HW_TXRX_ETHERNET:
+ ath10k_tx_h_8023(skb);
+ break;
+ case ATH10K_HW_TXRX_RAW:
+ /* FIXME: Packet injection isn't implemented. It should be
+ * doable with firmware 10.2 on qca988x.
+ */
+ WARN_ON_ONCE(1);
+ ieee80211_free_txskb(hw, skb);
+ return;
}
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
@@ -2736,7 +3662,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
}
}
- ath10k_tx_htt(ar, skb);
+ ath10k_mac_tx(ar, skb);
}
/* Must not be called with conf_mutex held as workers can use that also. */
@@ -2761,11 +3687,13 @@ void ath10k_halt(struct ath10k *ar)
clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
ar->filter_flags = 0;
ar->monitor = false;
+ ar->monitor_arvif = NULL;
if (ar->monitor_started)
ath10k_monitor_stop(ar);
ar->monitor_started = false;
+ ar->tx_paused = 0;
ath10k_scan_finish(ar);
ath10k_peer_cleanup_all(ar);
@@ -2859,6 +3787,7 @@ static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
static int ath10k_start(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
+ u32 burst_enable;
int ret = 0;
/*
@@ -2913,6 +3842,24 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_core_stop;
}
+ if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+ ret = ath10k_wmi_adaptive_qcs(ar, true);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+ }
+
+ if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
+ burst_enable = ar->wmi.pdev_param->burst_enable;
+ ret = ath10k_wmi_pdev_set_param(ar, burst_enable, 0);
+ if (ret) {
+ ath10k_warn(ar, "failed to disable burst: %d\n", ret);
+ goto err_core_stop;
+ }
+ }
+
if (ar->cfg_tx_chainmask)
__ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
ar->cfg_rx_chainmask);
@@ -2934,10 +3881,21 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_core_stop;
}
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->ani_enable, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable ani by default: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+
+ ar->ani_enabled = true;
+
ar->num_started_vdevs = 0;
ath10k_regd_update(ar);
ath10k_spectral_start(ar);
+ ath10k_thermal_set_throttling(ar);
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -2991,42 +3949,15 @@ static int ath10k_config_ps(struct ath10k *ar)
return ret;
}
-static const char *chandef_get_width(enum nl80211_chan_width width)
-{
- switch (width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- return "20 (noht)";
- case NL80211_CHAN_WIDTH_20:
- return "20";
- case NL80211_CHAN_WIDTH_40:
- return "40";
- case NL80211_CHAN_WIDTH_80:
- return "80";
- case NL80211_CHAN_WIDTH_80P80:
- return "80+80";
- case NL80211_CHAN_WIDTH_160:
- return "160";
- case NL80211_CHAN_WIDTH_5:
- return "5";
- case NL80211_CHAN_WIDTH_10:
- return "10";
- }
- return "?";
-}
-
-static void ath10k_config_chan(struct ath10k *ar)
+static void ath10k_mac_chan_reconfigure(struct ath10k *ar)
{
struct ath10k_vif *arvif;
+ struct cfg80211_chan_def def;
int ret;
lockdep_assert_held(&ar->conf_mutex);
- ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n",
- ar->chandef.chan->center_freq,
- ar->chandef.center_freq1,
- ar->chandef.center_freq2,
- chandef_get_width(ar->chandef.width));
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac chan reconfigure\n");
/* First stop monitor interface. Some FW versions crash if there's a
* lone monitor interface. */
@@ -3060,7 +3991,20 @@ static void ath10k_config_chan(struct ath10k *ar)
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
continue;
- ret = ath10k_vdev_restart(arvif);
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+ ret);
+
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ continue;
+
+ ret = ath10k_vdev_restart(arvif, &def);
if (ret) {
ath10k_warn(ar, "failed to restart vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -3147,26 +4091,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&ar->conf_mutex);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac config channel %dMHz flags 0x%x radar %d\n",
- conf->chandef.chan->center_freq,
- conf->chandef.chan->flags,
- conf->radar_enabled);
-
- spin_lock_bh(&ar->data_lock);
- ar->rx_channel = conf->chandef.chan;
- spin_unlock_bh(&ar->data_lock);
-
- ar->radar_enabled = conf->radar_enabled;
- ath10k_recalc_radar_detection(ar);
-
- if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
- ar->chandef = conf->chandef;
- ath10k_config_chan(ar);
- }
- }
-
if (changed & IEEE80211_CONF_CHANGE_PS)
ath10k_config_ps(ar);
@@ -3208,6 +4132,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
int ret = 0;
u32 value;
int bit;
+ int i;
u32 vdev_param;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
@@ -3220,6 +4145,17 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->vif = vif;
INIT_LIST_HEAD(&arvif->list);
+ INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
+ INIT_DELAYED_WORK(&arvif->connection_loss_work,
+ ath10k_mac_vif_sta_connection_loss_work);
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
if (ar->free_vdev_map == 0) {
ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
@@ -3262,6 +4198,15 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
break;
}
+ /* Using vdev_id as queue number will make it very easy to do per-vif
+ * tx queue locking. This shouldn't wrap due to interface combinations
+ * but do a modulo for correctness sake and prevent using offchannel tx
+ * queues for regular vif tx.
+ */
+ vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+
/* Some firmware revisions don't wait for beacon tx completion before
* sending another SWBA event. This could lead to hardware using old
* (freed) beacon data in some cases, e.g. tx credit starvation
@@ -3343,14 +4288,18 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
}
}
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
- ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr,
+ WMI_PEER_TYPE_DEFAULT);
if (ret) {
- ath10k_warn(ar, "failed to create vdev %i peer for AP: %d\n",
+ ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
+ }
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_mac_set_kickout(arvif);
if (ret) {
ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
@@ -3406,11 +4355,21 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err_peer_delete;
}
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ar->monitor_arvif = arvif;
+ ret = ath10k_monitor_recalc(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+ goto err_peer_delete;
+ }
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
err_peer_delete:
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
err_vdev_delete:
@@ -3430,6 +4389,14 @@ err:
return ret;
}
+static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
+{
+ int i;
+
+ for (i = 0; i < BITS_PER_LONG; i++)
+ ath10k_mac_vif_tx_unlock(arvif, i);
+}
+
static void ath10k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -3437,6 +4404,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret;
+ cancel_work_sync(&arvif->ap_csa_work);
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
@@ -3451,11 +4421,12 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ar->free_vdev_map |= 1LL << arvif->vdev_id;
list_del(&arvif->list);
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
vif->addr);
if (ret)
- ath10k_warn(ar, "failed to submit AP self-peer removal on vdev %i: %d\n",
+ ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
arvif->vdev_id, ret);
kfree(arvif->u.ap.noa_data);
@@ -3472,7 +4443,8 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
/* Some firmware revisions don't notify host about self-peer removal
* until after associated vdev is deleted.
*/
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
vif->addr);
if (ret)
@@ -3486,6 +4458,17 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_peer_cleanup(ar, arvif->vdev_id);
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ar->monitor_arvif = NULL;
+ ret = ath10k_monitor_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_mac_vif_tx_unlock_all(arvif);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
mutex_unlock(&ar->conf_mutex);
}
@@ -3493,8 +4476,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
* FIXME: Has to be verified.
*/
#define SUPPORTED_FILTERS \
- (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
+ (FIF_ALLMULTI | \
FIF_CONTROL | \
FIF_PSPOLL | \
FIF_OTHER_BSS | \
@@ -3615,6 +4597,13 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (ret)
ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
arvif->vdev_id, ret);
+
+ vdev_param = ar->wmi.vdev_param->protection_mode;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ info->use_cts_prot ? 1 : 0);
+ if (ret)
+ ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
+ info->use_cts_prot, arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -3791,10 +4780,14 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
* frames with multi-vif APs. This is not required for main firmware
* branch (e.g. 636).
*
- * FIXME: This has been tested only in AP. It remains unknown if this
- * is required for multi-vif STA interfaces on 10.1 */
+ * This is also needed for 636 fw for IBSS-RSN to work more reliably.
+ *
+ * FIXME: It remains unknown if this is required for multi-vif STA
+ * interfaces on 10.1.
+ */
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
return;
if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
@@ -3826,8 +4819,14 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
const u8 *peer_addr;
bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104;
- bool def_idx = false;
int ret = 0;
+ int ret2;
+ u32 flags = 0;
+ u32 flags2;
+
+ /* this one needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ return 1;
if (key->keyidx > WMI_MAX_KEY_INDEX)
return -ENOSPC;
@@ -3843,6 +4842,13 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key->hw_key_idx = key->keyidx;
+ if (is_wep) {
+ if (cmd == SET_KEY)
+ arvif->wep_keys[key->keyidx] = key;
+ else
+ arvif->wep_keys[key->keyidx] = NULL;
+ }
+
/* the peer should not disappear in mid-way (unless FW goes awry) since
* we already hold conf_mutex. we just make sure its there now. */
spin_lock_bh(&ar->data_lock);
@@ -3862,30 +4868,61 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
}
- if (is_wep) {
- if (cmd == SET_KEY)
- arvif->wep_keys[key->keyidx] = key;
- else
- arvif->wep_keys[key->keyidx] = NULL;
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ flags |= WMI_KEY_PAIRWISE;
+ else
+ flags |= WMI_KEY_GROUP;
+ if (is_wep) {
if (cmd == DISABLE_KEY)
ath10k_clear_vdev_key(arvif, key);
- }
- /* set TX_USAGE flag for all the keys incase of dot1x-WEP. For
- * static WEP, do not set this flag for the keys whose key id
- * is greater than default key id.
- */
- if (arvif->def_wep_key_idx == -1)
- def_idx = true;
+ /* When WEP keys are uploaded it's possible that there are
+ * stations associated already (e.g. when merging) without any
+ * keys. Static WEP needs an explicit per-peer key upload.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ cmd == SET_KEY)
+ ath10k_mac_vif_update_wep_key(arvif, key);
+
+ /* 802.1x never sets the def_wep_key_idx so each set_key()
+ * call changes default tx key.
+ *
+ * Static WEP sets def_wep_key_idx via .set_default_unicast_key
+ * after first set_key().
+ */
+ if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
+ flags |= WMI_KEY_TX_USAGE;
+ }
- ret = ath10k_install_key(arvif, key, cmd, peer_addr, def_idx);
+ ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
if (ret) {
ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
goto exit;
}
+ /* mac80211 sets static WEP keys as groupwise while firmware requires
+ * them to be installed twice as both pairwise and groupwise.
+ */
+ if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
+ flags2 = flags;
+ flags2 &= ~WMI_KEY_GROUP;
+ flags2 |= WMI_KEY_PAIRWISE;
+
+ ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
+ if (ret) {
+ ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret);
+ ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
+ peer_addr, flags);
+ if (ret2)
+ ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret2);
+ goto exit;
+ }
+ }
+
ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
spin_lock_bh(&ar->data_lock);
@@ -3933,6 +4970,7 @@ static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
}
arvif->def_wep_key_idx = keyidx;
+
unlock:
mutex_unlock(&arvif->ar->conf_mutex);
}
@@ -3943,6 +4981,10 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
struct ath10k_vif *arvif;
struct ath10k_sta *arsta;
struct ieee80211_sta *sta;
+ struct cfg80211_chan_def def;
+ enum ieee80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
u32 changed, bw, nss, smps;
int err;
@@ -3951,6 +4993,13 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
arvif = arsta->arvif;
ar = arvif->ar;
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
spin_lock_bh(&ar->data_lock);
changed = arsta->changed;
@@ -3964,6 +5013,10 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
mutex_lock(&ar->conf_mutex);
+ nss = max_t(u32, 1, nss);
+ nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+ ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
if (changed & IEEE80211_RC_BW_CHANGED) {
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
sta->addr, bw);
@@ -4011,14 +5064,14 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
mutex_unlock(&ar->conf_mutex);
}
-static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif)
+static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
{
struct ath10k *ar = arvif->ar;
lockdep_assert_held(&ar->conf_mutex);
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
return 0;
if (ar->num_stations >= ar->max_num_stations)
@@ -4029,19 +5082,72 @@ static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif)
return 0;
}
-static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif)
+static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
{
struct ath10k *ar = arvif->ar;
lockdep_assert_held(&ar->conf_mutex);
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
return;
ar->num_stations--;
}
+struct ath10k_mac_tdls_iter_data {
+ u32 num_tdls_stations;
+ struct ieee80211_vif *curr_vif;
+};
+
+static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_mac_tdls_iter_data *iter_data = data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ieee80211_vif *sta_vif = arsta->arvif->vif;
+
+ if (sta->tdls && sta_vif == iter_data->curr_vif)
+ iter_data->num_tdls_stations++;
+}
+
+static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_mac_tdls_iter_data data = {};
+
+ data.curr_vif = vif;
+
+ ieee80211_iterate_stations_atomic(hw,
+ ath10k_mac_tdls_vif_stations_count_iter,
+ &data);
+ return data.num_tdls_stations;
+}
+
+static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int *num_tdls_vifs = data;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
+ (*num_tdls_vifs)++;
+}
+
+static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
+{
+ int num_tdls_vifs = 0;
+
+ ieee80211_iterate_active_interfaces_atomic(hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_tdls_vifs_count_iter,
+ &num_tdls_vifs);
+ return num_tdls_vifs;
+}
+
static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -4072,41 +5178,80 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New station addition.
*/
+ enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
+ u32 num_tdls_stations;
+ u32 num_tdls_vifs;
+
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
arvif->vdev_id, sta->addr,
ar->num_stations + 1, ar->max_num_stations,
ar->num_peers + 1, ar->max_num_peers);
- ret = ath10k_mac_inc_num_stations(arvif);
+ ret = ath10k_mac_inc_num_stations(arvif, sta);
if (ret) {
ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
ar->max_num_stations);
goto exit;
}
- ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
+ if (sta->tdls)
+ peer_type = WMI_PEER_TYPE_TDLS;
+
+ ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr,
+ peer_type);
if (ret) {
ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
sta->addr, arvif->vdev_id, ret);
- ath10k_mac_dec_num_stations(arvif);
+ ath10k_mac_dec_num_stations(arvif, sta);
goto exit;
}
- if (vif->type == NL80211_IFTYPE_STATION) {
- WARN_ON(arvif->is_started);
+ if (!sta->tdls)
+ goto exit;
+
+ num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
+ num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
- ret = ath10k_vdev_start(arvif);
+ if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
+ num_tdls_stations == 0) {
+ ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
+ arvif->vdev_id, ar->max_num_tdls_vdevs);
+ ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
+ ret = -ENOBUFS;
+ goto exit;
+ }
+
+ if (num_tdls_stations == 0) {
+ /* This is the first tdls peer in current vif */
+ enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
+
+ ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ state);
if (ret) {
- ath10k_warn(ar, "failed to start vdev %i: %d\n",
+ ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
arvif->vdev_id, ret);
- WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id,
- sta->addr));
- ath10k_mac_dec_num_stations(arvif);
+ ath10k_peer_delete(ar, arvif->vdev_id,
+ sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
goto exit;
}
+ }
+
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+ WMI_TDLS_PEER_STATE_PEERING);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
- arvif->is_started = true;
+ if (num_tdls_stations != 0)
+ goto exit;
+ ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ WMI_TDLS_DISABLE);
}
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
@@ -4117,23 +5262,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
"mac vdev %d peer delete %pM (sta gone)\n",
arvif->vdev_id, sta->addr);
- if (vif->type == NL80211_IFTYPE_STATION) {
- WARN_ON(!arvif->is_started);
-
- ret = ath10k_vdev_stop(arvif);
- if (ret)
- ath10k_warn(ar, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
-
- arvif->is_started = false;
- }
-
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
sta->addr, arvif->vdev_id, ret);
- ath10k_mac_dec_num_stations(arvif);
+ ath10k_mac_dec_num_stations(arvif, sta);
+
+ if (!sta->tdls)
+ goto exit;
+
+ if (ath10k_mac_tdls_vif_stations_count(hw, vif))
+ goto exit;
+
+ /* This was the last tdls peer in current vif */
+ ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ WMI_TDLS_DISABLE);
+ if (ret) {
+ ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
+ arvif->vdev_id, ret);
+ }
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
(vif->type == NL80211_IFTYPE_AP ||
@@ -4149,9 +5297,30 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
} else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTH &&
- (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC)) {
+ new_state == IEEE80211_STA_AUTHORIZED &&
+ sta->tdls) {
+ /*
+ * Tdls station authorized.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
+ sta->addr);
+
+ ret = ath10k_station_assoc(ar, vif, sta, false);
+ if (ret) {
+ ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto exit;
+ }
+
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+ WMI_TDLS_PEER_STATE_CONNECTED);
+ if (ret)
+ ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
/*
* Disassociation.
*/
@@ -4356,6 +5525,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct wmi_start_scan_arg arg;
int ret = 0;
+ u32 scan_time_msec;
mutex_lock(&ar->conf_mutex);
@@ -4382,7 +5552,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
if (ret)
goto exit;
- duration = max(duration, WMI_SCAN_CHAN_MIN_TIME_MSEC);
+ scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
memset(&arg, 0, sizeof(arg));
ath10k_wmi_start_scan_init(ar, &arg);
@@ -4390,11 +5560,12 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
arg.scan_id = ATH10K_SCAN_ID;
arg.n_channels = 1;
arg.channels[0] = chan->center_freq;
- arg.dwell_time_active = duration;
- arg.dwell_time_passive = duration;
- arg.max_scan_time = 2 * duration;
+ arg.dwell_time_active = scan_time_msec;
+ arg.dwell_time_passive = scan_time_msec;
+ arg.max_scan_time = scan_time_msec;
arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ arg.burst_duration_ms = duration;
ret = ath10k_start_scan(ar, &arg);
if (ret) {
@@ -4417,6 +5588,9 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
goto exit;
}
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(duration));
+
ret = 0;
exit:
mutex_unlock(&ar->conf_mutex);
@@ -4512,70 +5686,6 @@ static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
return 1;
}
-#ifdef CONFIG_PM
-static int ath10k_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wowlan)
-{
- struct ath10k *ar = hw->priv;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- ret = ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND);
- if (ret) {
- if (ret == -ETIMEDOUT)
- goto resume;
- ret = 1;
- goto exit;
- }
-
- ret = ath10k_hif_suspend(ar);
- if (ret) {
- ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
- goto resume;
- }
-
- ret = 0;
- goto exit;
-resume:
- ret = ath10k_wmi_pdev_resume_target(ar);
- if (ret)
- ath10k_warn(ar, "failed to resume target: %d\n", ret);
-
- ret = 1;
-exit:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static int ath10k_resume(struct ieee80211_hw *hw)
-{
- struct ath10k *ar = hw->priv;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- ret = ath10k_hif_resume(ar);
- if (ret) {
- ath10k_warn(ar, "failed to resume hif: %d\n", ret);
- ret = 1;
- goto exit;
- }
-
- ret = ath10k_wmi_pdev_resume_target(ar);
- if (ret) {
- ath10k_warn(ar, "failed to resume target: %d\n", ret);
- ret = 1;
- goto exit;
- }
-
- ret = 0;
-exit:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-#endif
-
static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type)
{
@@ -4635,343 +5745,286 @@ exit:
return ret;
}
-/* Helper table for legacy fixed_rate/bitrate_mask */
-static const u8 cck_ofdm_rate[] = {
- /* CCK */
- 3, /* 1Mbps */
- 2, /* 2Mbps */
- 1, /* 5.5Mbps */
- 0, /* 11Mbps */
- /* OFDM */
- 3, /* 6Mbps */
- 7, /* 9Mbps */
- 2, /* 12Mbps */
- 6, /* 18Mbps */
- 1, /* 24Mbps */
- 5, /* 36Mbps */
- 0, /* 48Mbps */
- 4, /* 54Mbps */
-};
-
-/* Check if only one bit set */
-static int ath10k_check_single_mask(u32 mask)
-{
- int bit;
-
- bit = ffs(mask);
- if (!bit)
- return 0;
-
- mask &= ~BIT(bit - 1);
- if (mask)
- return 2;
-
- return 1;
-}
-
static bool
-ath10k_default_bitrate_mask(struct ath10k *ar,
- enum ieee80211_band band,
- const struct cfg80211_bitrate_mask *mask)
+ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
{
- u32 legacy = 0x00ff;
- u8 ht = 0xff, i;
- u16 vht = 0x3ff;
- u16 nrf = ar->num_rf_chains;
+ int num_rates = 0;
+ int i;
- if (ar->cfg_tx_chainmask)
- nrf = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+ num_rates += hweight32(mask->control[band].legacy);
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- legacy = 0x00fff;
- vht = 0;
- break;
- case IEEE80211_BAND_5GHZ:
- break;
- default:
- return false;
- }
-
- if (mask->control[band].legacy != legacy)
- return false;
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+ num_rates += hweight8(mask->control[band].ht_mcs[i]);
- for (i = 0; i < nrf; i++)
- if (mask->control[band].ht_mcs[i] != ht)
- return false;
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
+ num_rates += hweight16(mask->control[band].vht_mcs[i]);
- for (i = 0; i < nrf; i++)
- if (mask->control[band].vht_mcs[i] != vht)
- return false;
-
- return true;
+ return num_rates == 1;
}
static bool
-ath10k_bitrate_mask_nss(const struct cfg80211_bitrate_mask *mask,
- enum ieee80211_band band,
- u8 *fixed_nss)
-{
- int ht_nss = 0, vht_nss = 0, i;
+ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *nss)
+{
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u8 ht_nss_mask = 0;
+ u8 vht_nss_mask = 0;
+ int i;
- /* check legacy */
- if (ath10k_check_single_mask(mask->control[band].legacy))
+ if (mask->control[band].legacy)
return false;
- /* check HT */
- for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
- if (mask->control[band].ht_mcs[i] == 0xff)
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (mask->control[band].ht_mcs[i] == 0)
continue;
- else if (mask->control[band].ht_mcs[i] == 0x00)
- break;
-
- return false;
+ else if (mask->control[band].ht_mcs[i] ==
+ sband->ht_cap.mcs.rx_mask[i])
+ ht_nss_mask |= BIT(i);
+ else
+ return false;
}
- ht_nss = i;
-
- /* check VHT */
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
- if (mask->control[band].vht_mcs[i] == 0x03ff)
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (mask->control[band].vht_mcs[i] == 0)
continue;
- else if (mask->control[band].vht_mcs[i] == 0x0000)
- break;
-
- return false;
+ else if (mask->control[band].vht_mcs[i] ==
+ ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+ vht_nss_mask |= BIT(i);
+ else
+ return false;
}
- vht_nss = i;
-
- if (ht_nss > 0 && vht_nss > 0)
+ if (ht_nss_mask != vht_nss_mask)
return false;
- if (ht_nss)
- *fixed_nss = ht_nss;
- else if (vht_nss)
- *fixed_nss = vht_nss;
- else
+ if (ht_nss_mask == 0)
return false;
- return true;
-}
-
-static bool
-ath10k_bitrate_mask_correct(const struct cfg80211_bitrate_mask *mask,
- enum ieee80211_band band,
- enum wmi_rate_preamble *preamble)
-{
- int legacy = 0, ht = 0, vht = 0, i;
-
- *preamble = WMI_RATE_PREAMBLE_OFDM;
-
- /* check legacy */
- legacy = ath10k_check_single_mask(mask->control[band].legacy);
- if (legacy > 1)
+ if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
return false;
- /* check HT */
- for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
- ht += ath10k_check_single_mask(mask->control[band].ht_mcs[i]);
- if (ht > 1)
- return false;
-
- /* check VHT */
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
- vht += ath10k_check_single_mask(mask->control[band].vht_mcs[i]);
- if (vht > 1)
- return false;
-
- /* Currently we support only one fixed_rate */
- if ((legacy + ht + vht) != 1)
- return false;
-
- if (ht)
- *preamble = WMI_RATE_PREAMBLE_HT;
- else if (vht)
- *preamble = WMI_RATE_PREAMBLE_VHT;
+ *nss = fls(ht_nss_mask);
return true;
}
-static bool
-ath10k_bitrate_mask_rate(struct ath10k *ar,
- const struct cfg80211_bitrate_mask *mask,
- enum ieee80211_band band,
- u8 *fixed_rate,
- u8 *fixed_nss)
+static int
+ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ u8 *rate, u8 *nss)
{
- u8 rate = 0, pream = 0, nss = 0, i;
- enum wmi_rate_preamble preamble;
-
- /* Check if single rate correct */
- if (!ath10k_bitrate_mask_correct(mask, band, &preamble))
- return false;
-
- pream = preamble;
-
- switch (preamble) {
- case WMI_RATE_PREAMBLE_CCK:
- case WMI_RATE_PREAMBLE_OFDM:
- i = ffs(mask->control[band].legacy) - 1;
-
- if (band == IEEE80211_BAND_2GHZ && i < 4)
- pream = WMI_RATE_PREAMBLE_CCK;
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ int rate_idx;
+ int i;
+ u16 bitrate;
+ u8 preamble;
+ u8 hw_rate;
- if (band == IEEE80211_BAND_5GHZ)
- i += 4;
+ if (hweight32(mask->control[band].legacy) == 1) {
+ rate_idx = ffs(mask->control[band].legacy) - 1;
- if (i >= ARRAY_SIZE(cck_ofdm_rate))
- return false;
+ hw_rate = sband->bitrates[rate_idx].hw_value;
+ bitrate = sband->bitrates[rate_idx].bitrate;
- rate = cck_ofdm_rate[i];
- break;
- case WMI_RATE_PREAMBLE_HT:
- for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
- if (mask->control[band].ht_mcs[i])
- break;
-
- if (i == IEEE80211_HT_MCS_MASK_LEN)
- return false;
-
- rate = ffs(mask->control[band].ht_mcs[i]) - 1;
- nss = i;
- break;
- case WMI_RATE_PREAMBLE_VHT:
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
- if (mask->control[band].vht_mcs[i])
- break;
+ if (ath10k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
- if (i == NL80211_VHT_NSS_MAX)
- return false;
+ *nss = 1;
+ *rate = preamble << 6 |
+ (*nss - 1) << 4 |
+ hw_rate << 0;
- rate = ffs(mask->control[band].vht_mcs[i]) - 1;
- nss = i;
- break;
+ return 0;
}
- *fixed_nss = nss + 1;
- nss <<= 4;
- pream <<= 6;
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
+ *nss = i + 1;
+ *rate = WMI_RATE_PREAMBLE_HT << 6 |
+ (*nss - 1) << 4 |
+ (ffs(mask->control[band].ht_mcs[i]) - 1);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
- pream, nss, rate);
-
- *fixed_rate = pream | nss | rate;
+ return 0;
+ }
+ }
- return true;
-}
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+ *nss = i + 1;
+ *rate = WMI_RATE_PREAMBLE_VHT << 6 |
+ (*nss - 1) << 4 |
+ (ffs(mask->control[band].vht_mcs[i]) - 1);
-static bool ath10k_get_fixed_rate_nss(struct ath10k *ar,
- const struct cfg80211_bitrate_mask *mask,
- enum ieee80211_band band,
- u8 *fixed_rate,
- u8 *fixed_nss)
-{
- /* First check full NSS mask, if we can simply limit NSS */
- if (ath10k_bitrate_mask_nss(mask, band, fixed_nss))
- return true;
+ return 0;
+ }
+ }
- /* Next Check single rate is set */
- return ath10k_bitrate_mask_rate(ar, mask, band, fixed_rate, fixed_nss);
+ return -EINVAL;
}
-static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
- u8 fixed_rate,
- u8 fixed_nss,
- u8 force_sgi)
+static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
+ u8 rate, u8 nss, u8 sgi)
{
struct ath10k *ar = arvif->ar;
u32 vdev_param;
- int ret = 0;
-
- mutex_lock(&ar->conf_mutex);
-
- if (arvif->fixed_rate == fixed_rate &&
- arvif->fixed_nss == fixed_nss &&
- arvif->force_sgi == force_sgi)
- goto exit;
+ int ret;
- if (fixed_rate == WMI_FIXED_RATE_NONE)
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
+ lockdep_assert_held(&ar->conf_mutex);
- if (force_sgi)
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac force sgi\n");
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
+ arvif->vdev_id, rate, nss, sgi);
vdev_param = ar->wmi.vdev_param->fixed_rate;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- vdev_param, fixed_rate);
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
if (ret) {
ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
- fixed_rate, ret);
- ret = -EINVAL;
- goto exit;
+ rate, ret);
+ return ret;
}
- arvif->fixed_rate = fixed_rate;
-
vdev_param = ar->wmi.vdev_param->nss;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- vdev_param, fixed_nss);
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
+ if (ret) {
+ ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
+ return ret;
+ }
+ vdev_param = ar->wmi.vdev_param->sgi;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
if (ret) {
- ath10k_warn(ar, "failed to set fixed nss param %d: %d\n",
- fixed_nss, ret);
- ret = -EINVAL;
- goto exit;
+ ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
+ return ret;
}
- arvif->fixed_nss = fixed_nss;
+ return 0;
+}
- vdev_param = ar->wmi.vdev_param->sgi;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
- force_sgi);
+static bool
+ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 vht_mcs;
- if (ret) {
- ath10k_warn(ar, "failed to set sgi param %d: %d\n",
- force_sgi, ret);
- ret = -EINVAL;
- goto exit;
+ /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
+ * to express all VHT MCS rate masks. Effectively only the following
+ * ranges can be used: none, 0-7, 0-8 and 0-9.
+ */
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = mask->control[band].vht_mcs[i];
+
+ switch (vht_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(9) - 1:
+ case BIT(10) - 1:
+ break;
+ default:
+ ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
+ return false;
+ }
}
- arvif->force_sgi = force_sgi;
+ return true;
+}
-exit:
- mutex_unlock(&ar->conf_mutex);
- return ret;
+static void ath10k_mac_set_bitrate_mask_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_vif *arvif = data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arvif->ar;
+
+ if (arsta->arvif != arvif)
+ return;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(ar->hw, &arsta->update_wk);
}
-static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const struct cfg80211_bitrate_mask *mask)
+static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
struct ath10k *ar = arvif->ar;
- enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
- u8 fixed_rate = WMI_FIXED_RATE_NONE;
- u8 fixed_nss = ar->num_rf_chains;
- u8 force_sgi;
+ enum ieee80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u8 rate;
+ u8 nss;
+ u8 sgi;
+ int single_nss;
+ int ret;
- if (ar->cfg_tx_chainmask)
- fixed_nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+ if (ath10k_mac_vif_chan(vif, &def))
+ return -EPERM;
+
+ band = def.chan->band;
+ ht_mcs_mask = mask->control[band].ht_mcs;
+ vht_mcs_mask = mask->control[band].vht_mcs;
- force_sgi = mask->control[band].gi;
- if (force_sgi == NL80211_TXRATE_FORCE_LGI)
+ sgi = mask->control[band].gi;
+ if (sgi == NL80211_TXRATE_FORCE_LGI)
return -EINVAL;
- if (!ath10k_default_bitrate_mask(ar, band, mask)) {
- if (!ath10k_get_fixed_rate_nss(ar, mask, band,
- &fixed_rate,
- &fixed_nss))
+ if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
+ ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
+ &rate, &nss);
+ if (ret) {
+ ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+ &single_nss)) {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = single_nss;
+ } else {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = min(ar->num_rf_chains,
+ max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+ ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
+ if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath10k_mac_set_bitrate_mask_iter,
+ arvif);
+
+ mutex_unlock(&ar->conf_mutex);
}
- if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
- ath10k_warn(ar, "failed to force SGI usage for default rate settings\n");
- return -EINVAL;
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi);
+ if (ret) {
+ ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto exit;
}
- return ath10k_set_fixed_rate_param(arvif, fixed_rate,
- fixed_nss, force_sgi);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
}
static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
@@ -5090,6 +6143,286 @@ static int ath10k_ampdu_action(struct ieee80211_hw *hw,
return -EINVAL;
}
+static void
+ath10k_mac_update_rx_channel(struct ath10k *ar)
+{
+ struct cfg80211_chan_def *def = NULL;
+
+ /* Both locks are required because ar->rx_channel is modified. This
+ * allows readers to hold either lock.
+ */
+ lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_held(&ar->data_lock);
+
+ /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
+ * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
+ * ppdu on Rx may reduce performance on low-end systems. It should be
+ * possible to make tables/hashmaps to speed the lookup up (be vary of
+ * cpu data cache lines though regarding sizes) but to keep the initial
+ * implementation simple and less intrusive fallback to the slow lookup
+ * only for multi-channel cases. Single-channel cases will remain to
+ * use the old channel derival and thus performance should not be
+ * affected much.
+ */
+ rcu_read_lock();
+ if (ath10k_mac_num_chanctxs(ar) == 1) {
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_get_any_chandef_iter,
+ &def);
+ ar->rx_channel = def->chan;
+ } else {
+ ar->rx_channel = NULL;
+ }
+ rcu_read_unlock();
+}
+
+static void
+ath10k_mac_chan_ctx_init(struct ath10k *ar,
+ struct ath10k_chanctx *arctx,
+ struct ieee80211_chanctx_conf *conf)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_held(&ar->data_lock);
+
+ memset(arctx, 0, sizeof(*arctx));
+
+ arctx->conf = *conf;
+}
+
+static int
+ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_chanctx *arctx = (void *)ctx->drv_priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx add freq %hu width %d ptr %p\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_chan_ctx_init(ar, arctx, ctx);
+ ath10k_mac_update_rx_channel(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_recalc_radar_detection(ar);
+ ath10k_monitor_recalc(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static void
+ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx remove freq %hu width %d ptr %p\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_update_rx_channel(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_recalc_radar_detection(ar);
+ ath10k_monitor_recalc(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void
+ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_chanctx *arctx = (void *)ctx->drv_priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx change freq %hu->%hu width %d->%d ptr %p changed %x\n",
+ arctx->conf.def.chan->center_freq,
+ ctx->def.chan->center_freq,
+ arctx->conf.def.width, ctx->def.width,
+ ctx, changed);
+
+ /* This shouldn't really happen because channel switching should use
+ * switch_vif_chanctx().
+ */
+ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+ goto unlock;
+
+ spin_lock_bh(&ar->data_lock);
+ arctx->conf = *ctx;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_recalc_radar_detection(ar);
+
+ /* FIXME: How to configure Rx chains properly? */
+
+ /* No other actions are actually necessary. Firmware maintains channel
+ * definitions per vdev internally and there's no host-side channel
+ * context abstraction to configure, e.g. channel width.
+ */
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_chanctx *arctx = (void *)ctx->drv_priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx assign ptr %p vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ if (WARN_ON(arvif->is_started)) {
+ mutex_unlock(&ar->conf_mutex);
+ return -EBUSY;
+ }
+
+ ret = ath10k_vdev_start(arvif, &arctx->conf.def);
+ if (ret) {
+ ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ arctx->conf.def.chan->center_freq, ret);
+ goto err;
+ }
+
+ arvif->is_started = true;
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_stop;
+ }
+
+ arvif->is_up = true;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_stop:
+ ath10k_vdev_stop(arvif);
+ arvif->is_started = false;
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void
+ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx unassign ptr %p vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ WARN_ON(!arvif->is_started);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ WARN_ON(!arvif->is_up);
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+ }
+
+ ret = ath10k_vdev_stop(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif;
+ struct ath10k_chanctx *arctx_new, *arctx_old;
+ int i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx switch n_vifs %d mode %d\n",
+ n_vifs, mode);
+
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < n_vifs; i++) {
+ arvif = ath10k_vif_to_arvif(vifs[i].vif);
+ arctx_new = (void *)vifs[i].new_ctx->drv_priv;
+ arctx_old = (void *)vifs[i].old_ctx->drv_priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d ptr %p->%p\n",
+ arvif->vdev_id,
+ vifs[i].old_ctx->def.chan->center_freq,
+ vifs[i].new_ctx->def.chan->center_freq,
+ vifs[i].old_ctx->def.width,
+ vifs[i].new_ctx->def.width,
+ arctx_old, arctx_new);
+
+ if (mode == CHANCTX_SWMODE_SWAP_CONTEXTS) {
+ ath10k_mac_chan_ctx_init(ar, arctx_new,
+ vifs[i].new_ctx);
+ }
+
+ arctx_new->conf = *vifs[i].new_ctx;
+
+ /* FIXME: ath10k_mac_chan_reconfigure() uses current, i.e. not
+ * yet updated chanctx_conf pointer.
+ */
+ arctx_old->conf = *vifs[i].new_ctx;
+ }
+ ath10k_mac_update_rx_channel(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* FIXME: Reconfigure only affected vifs */
+ ath10k_mac_chan_reconfigure(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+}
+
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_tx,
.start = ath10k_start,
@@ -5114,31 +6447,31 @@ static const struct ieee80211_ops ath10k_ops = {
.get_antenna = ath10k_get_antenna,
.reconfig_complete = ath10k_reconfig_complete,
.get_survey = ath10k_get_survey,
- .set_bitrate_mask = ath10k_set_bitrate_mask,
+ .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
.sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf,
.ampdu_action = ath10k_ampdu_action,
.get_et_sset_count = ath10k_debug_get_et_sset_count,
.get_et_stats = ath10k_debug_get_et_stats,
.get_et_strings = ath10k_debug_get_et_strings,
+ .add_chanctx = ath10k_mac_op_add_chanctx,
+ .remove_chanctx = ath10k_mac_op_remove_chanctx,
+ .change_chanctx = ath10k_mac_op_change_chanctx,
+ .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
+ .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
#ifdef CONFIG_PM
- .suspend = ath10k_suspend,
- .resume = ath10k_resume,
+ .suspend = ath10k_wow_op_suspend,
+ .resume = ath10k_wow_op_resume,
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = ath10k_sta_add_debugfs,
#endif
};
-#define RATETAB_ENT(_rate, _rateid, _flags) { \
- .bitrate = (_rate), \
- .flags = (_flags), \
- .hw_value = (_rateid), \
-}
-
#define CHAN2G(_channel, _freq, _flags) { \
.band = IEEE80211_BAND_2GHZ, \
.hw_value = (_channel), \
@@ -5194,6 +6527,7 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
CHAN5G(132, 5660, 0),
CHAN5G(136, 5680, 0),
CHAN5G(140, 5700, 0),
+ CHAN5G(144, 5720, 0),
CHAN5G(149, 5745, 0),
CHAN5G(153, 5765, 0),
CHAN5G(157, 5785, 0),
@@ -5201,31 +6535,6 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
CHAN5G(165, 5825, 0),
};
-/* Note: Be careful if you re-order these. There is code which depends on this
- * ordering.
- */
-static struct ieee80211_rate ath10k_rates[] = {
- /* CCK */
- RATETAB_ENT(10, 0x82, 0),
- RATETAB_ENT(20, 0x84, 0),
- RATETAB_ENT(55, 0x8b, 0),
- RATETAB_ENT(110, 0x96, 0),
- /* OFDM */
- RATETAB_ENT(60, 0x0c, 0),
- RATETAB_ENT(90, 0x12, 0),
- RATETAB_ENT(120, 0x18, 0),
- RATETAB_ENT(180, 0x24, 0),
- RATETAB_ENT(240, 0x30, 0),
- RATETAB_ENT(360, 0x48, 0),
- RATETAB_ENT(480, 0x60, 0),
- RATETAB_ENT(540, 0x6c, 0),
-};
-
-#define ath10k_a_rates (ath10k_rates + 4)
-#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - 4)
-#define ath10k_g_rates (ath10k_rates + 0)
-#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
-
struct ath10k *ath10k_mac_create(size_t priv_size)
{
struct ieee80211_hw *hw;
@@ -5299,15 +6608,92 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
},
};
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC),
+ },
+};
+
+/* FIXME: This is not thouroughly tested. These combinations may over- or
+ * underestimate hw/fw capabilities.
+ */
+static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
+ {
+ .limits = ath10k_tlv_if_limit,
+ .num_different_channels = 1,
+ .max_interfaces = 3,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+ },
+ {
+ .limits = ath10k_tlv_if_limit_ibss,
+ .num_different_channels = 1,
+ .max_interfaces = 2,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+ },
+};
+
+static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
+ {
+ .limits = ath10k_tlv_if_limit,
+ .num_different_channels = 2,
+ .max_interfaces = 3,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+ },
+ {
+ .limits = ath10k_tlv_if_limit_ibss,
+ .num_different_channels = 1,
+ .max_interfaces = 2,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+ },
+};
+
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
u16 mcs_map;
+ u32 val;
int i;
vht_cap.vht_supported = 1;
vht_cap.cap = ar->vht_cap_info;
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+ val = ar->num_rf_chains - 1;
+ val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+
+ vht_cap.cap |= val;
+ }
+
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+ val = ar->num_rf_chains - 1;
+ val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+
+ vht_cap.cap |= val;
+ }
+
mcs_map = 0;
for (i = 0; i < 8; i++) {
if (i < ar->num_rf_chains)
@@ -5438,6 +6824,10 @@ int ath10k_mac_register(struct ath10k *ar)
ht_cap = ath10k_get_ht_cap(ar);
vht_cap = ath10k_create_vht_cap(ar);
+ BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
+ ARRAY_SIZE(ath10k_5ghz_channels)) !=
+ ATH10K_NUM_CHANS);
+
if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
channels = kmemdup(ath10k_2ghz_channels,
sizeof(ath10k_2ghz_channels),
@@ -5500,9 +6890,16 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_SW_CRYPTO_CONTROL;
+ IEEE80211_HW_SW_CRYPTO_CONTROL |
+ IEEE80211_HW_SUPPORT_FAST_XMIT |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_SUPPORTS_PER_STA_GTK |
+ IEEE80211_HW_WANT_MONITOR_VIF |
+ IEEE80211_HW_CHANCTX_STA_CSA |
+ IEEE80211_HW_QUEUE_CONTROL;
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
@@ -5517,6 +6914,7 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
ar->hw->sta_data_size = sizeof(struct ath10k_sta);
+ ar->hw->chanctx_data_size = sizeof(struct ath10k_chanctx);
ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
@@ -5533,6 +6931,9 @@ int ath10k_mac_register(struct ath10k *ar)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
}
+ if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
+ ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
@@ -5540,20 +6941,46 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
+ ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+ ret = ath10k_wow_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to init wow: %d\n", ret);
+ goto err_free;
+ }
+
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
*/
- ar->hw->queues = 4;
+ ar->hw->queues = IEEE80211_MAX_QUEUES;
+
+ /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
+ * something that vdev_ids can't reach so that we don't stop the queue
+ * accidentally.
+ */
+ ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
switch (ar->wmi.op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
- case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->hw->wiphy->iface_combinations = ath10k_if_comb;
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_if_comb);
ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+ ar->hw->wiphy->iface_combinations =
+ ath10k_tlv_qcs_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
+ } else {
+ ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_tlv_if_comb);
+ }
+ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 68296117d203..b291f063705c 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -23,11 +23,22 @@
#define WEP_KEYID_SHIFT 6
+enum wmi_tlv_tx_pause_id;
+enum wmi_tlv_tx_pause_action;
+
struct ath10k_generic_iter {
struct ath10k *ar;
int ret;
};
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
struct ath10k *ath10k_mac_create(size_t priv_size);
void ath10k_mac_destroy(struct ath10k *ar);
int ath10k_mac_register(struct ath10k *ar);
@@ -45,6 +56,24 @@ void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
void ath10k_drain_tx(struct ath10k *ar);
bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
u8 keyidx);
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def);
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
+void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action);
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate);
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate);
+
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason);
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
diff --git a/drivers/net/wireless/ath/ath10k/p2p.c b/drivers/net/wireless/ath/ath10k/p2p.c
new file mode 100644
index 000000000000..c0b6ffaf3ec1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/p2p.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "wmi.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath10k_p2p_noa_ie_fill(u8 *data, size_t len,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ieee80211_p2p_noa_attr *noa_attr;
+ u8 ctwindow_oppps = noa->ctwindow_oppps;
+ u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
+ bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
+ __le16 *noa_attr_len;
+ u16 attr_len;
+ u8 noa_descriptors = noa->num_descriptors;
+ int i;
+
+ /* P2P IE */
+ data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ data[1] = len - 2;
+ data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+ /* NOA ATTR */
+ data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+ noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+ noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+ noa_attr->index = noa->index;
+ noa_attr->oppps_ctwindow = ctwindow;
+ if (oppps)
+ noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ for (i = 0; i < noa_descriptors; i++) {
+ noa_attr->desc[i].count =
+ __le32_to_cpu(noa->descriptors[i].type_count);
+ noa_attr->desc[i].duration = noa->descriptors[i].duration;
+ noa_attr->desc[i].interval = noa->descriptors[i].interval;
+ noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+ }
+
+ attr_len = 2; /* index + oppps_ctwindow */
+ attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+ *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t ath10k_p2p_noa_ie_len_compute(const struct wmi_p2p_noa_info *noa)
+{
+ size_t len = 0;
+
+ if (!noa->num_descriptors &&
+ !(noa->ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT))
+ return 0;
+
+ len += 1 + 1 + 4; /* EID + len + OUI */
+ len += 1 + 2; /* noa attr + attr len */
+ len += 1 + 1; /* index + oppps_ctwindow */
+ len += noa->num_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+
+ return len;
+}
+
+static void ath10k_p2p_noa_ie_assign(struct ath10k_vif *arvif, void *ie,
+ size_t len)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ kfree(arvif->u.ap.noa_data);
+
+ arvif->u.ap.noa_data = ie;
+ arvif->u.ap.noa_len = len;
+}
+
+static void __ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k *ar = arvif->ar;
+ void *ie;
+ size_t len;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath10k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+ len = ath10k_p2p_noa_ie_len_compute(noa);
+ if (!len)
+ return;
+
+ ie = kmalloc(len, GFP_ATOMIC);
+ if (!ie)
+ return;
+
+ ath10k_p2p_noa_ie_fill(ie, len, noa);
+ ath10k_p2p_noa_ie_assign(arvif, ie, len);
+}
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ __ath10k_p2p_noa_update(arvif, noa);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+struct ath10k_p2p_noa_arg {
+ u32 vdev_id;
+ const struct wmi_p2p_noa_info *noa;
+};
+
+static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_p2p_noa_arg *arg = data;
+
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
+ ath10k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k_p2p_noa_arg arg = {
+ .vdev_id = vdev_id,
+ .noa = noa,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_p2p_noa_update_vdev_iter,
+ &arg);
+}
diff --git a/drivers/net/wireless/ath/ath10k/p2p.h b/drivers/net/wireless/ath/ath10k/p2p.h
new file mode 100644
index 000000000000..7be616e2e121
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/p2p.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _P2P_H
+#define _P2P_H
+
+struct ath10k_vif;
+struct wmi_p2p_noa_info;
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa);
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_p2p_noa_info *noa);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 7681237fe298..17a060e8efa2 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -113,7 +113,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
- .dest_nentries = 32,
+ .dest_nentries = 128,
},
/* CE3: host->target WMI */
@@ -183,7 +183,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
{
.pipenum = __cpu_to_le32(2),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
+ .nentries = __cpu_to_le32(64),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
@@ -330,6 +330,205 @@ static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
},
};
+static bool ath10k_pci_is_awake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ RTC_STATE_ADDRESS);
+
+ return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
+}
+
+static void __ath10k_pci_wake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ lockdep_assert_held(&ar_pci->ps_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ iowrite32(PCIE_SOC_WAKE_V_MASK,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+}
+
+static void __ath10k_pci_sleep(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ lockdep_assert_held(&ar_pci->ps_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ iowrite32(PCIE_SOC_WAKE_RESET,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+ ar_pci->ps_awake = false;
+}
+
+static int ath10k_pci_wake_wait(struct ath10k *ar)
+{
+ int tot_delay = 0;
+ int curr_delay = 5;
+
+ while (tot_delay < PCIE_WAKE_TIMEOUT) {
+ if (ath10k_pci_is_awake(ar))
+ return 0;
+
+ udelay(curr_delay);
+ tot_delay += curr_delay;
+
+ if (curr_delay < 50)
+ curr_delay += 5;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int ath10k_pci_wake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ /* This function can be called very frequently. To avoid excessive
+ * CPU stalls for MMIO reads use a cache var to hold the device state.
+ */
+ if (!ar_pci->ps_awake) {
+ __ath10k_pci_wake(ar);
+
+ ret = ath10k_pci_wake_wait(ar);
+ if (ret == 0)
+ ar_pci->ps_awake = true;
+ }
+
+ if (ret == 0) {
+ ar_pci->ps_wake_refcount++;
+ WARN_ON(ar_pci->ps_wake_refcount == 0);
+ }
+
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+
+ return ret;
+}
+
+static void ath10k_pci_sleep(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ if (WARN_ON(ar_pci->ps_wake_refcount == 0))
+ goto skip;
+
+ ar_pci->ps_wake_refcount--;
+
+ mod_timer(&ar_pci->ps_timer, jiffies +
+ msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
+
+skip:
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_pci_ps_timer(unsigned long ptr)
+{
+ struct ath10k *ar = (void *)ptr;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ if (ar_pci->ps_wake_refcount > 0)
+ goto skip;
+
+ __ath10k_pci_sleep(ar);
+
+skip:
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_pci_sleep_sync(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ del_timer_sync(&ar_pci->ps_timer);
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+ WARN_ON(ar_pci->ps_wake_refcount > 0);
+ __ath10k_pci_sleep(ar);
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
+ value, offset, ret);
+ return;
+ }
+
+ iowrite32(value, ar_pci->mem + offset);
+ ath10k_pci_sleep(ar);
+}
+
+u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 val;
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
+ offset, ret);
+ return 0xffffffff;
+ }
+
+ val = ioread32(ar_pci->mem + offset);
+ ath10k_pci_sleep(ar);
+
+ return val;
+}
+
+u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
+}
+
+u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
+}
+
static bool ath10k_pci_irq_pending(struct ath10k *ar)
{
u32 cause;
@@ -793,45 +992,6 @@ static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
}
-static bool ath10k_pci_is_awake(struct ath10k *ar)
-{
- u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
-
- return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
-}
-
-static int ath10k_pci_wake_wait(struct ath10k *ar)
-{
- int tot_delay = 0;
- int curr_delay = 5;
-
- while (tot_delay < PCIE_WAKE_TIMEOUT) {
- if (ath10k_pci_is_awake(ar))
- return 0;
-
- udelay(curr_delay);
- tot_delay += curr_delay;
-
- if (curr_delay < 50)
- curr_delay += 5;
- }
-
- return -ETIMEDOUT;
-}
-
-static int ath10k_pci_wake(struct ath10k *ar)
-{
- ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
- PCIE_SOC_WAKE_V_MASK);
- return ath10k_pci_wake_wait(ar);
-}
-
-static void ath10k_pci_sleep(struct ath10k *ar)
-{
- ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
- PCIE_SOC_WAKE_RESET);
-}
-
/* Called by lower (CE) layer when a send to Target completes. */
static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
{
@@ -1212,11 +1372,15 @@ static void ath10k_pci_irq_enable(struct ath10k *ar)
static int ath10k_pci_hif_start(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
ath10k_pci_irq_enable(ar);
ath10k_pci_rx_post(ar);
+ pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ ar_pci->link_ctl);
+
return 0;
}
@@ -1329,6 +1493,9 @@ static void ath10k_pci_flush(struct ath10k *ar)
static void ath10k_pci_hif_stop(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
/* Most likely the device has HTT Rx ring configured. The only way to
@@ -1347,6 +1514,10 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_irq_disable(ar);
ath10k_pci_irq_sync(ar);
ath10k_pci_flush(ar);
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+ WARN_ON(ar_pci->ps_wake_refcount > 0);
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
@@ -1524,12 +1695,11 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
case QCA6174_HW_1_0_CHIP_ID_REV:
case QCA6174_HW_1_1_CHIP_ID_REV:
+ case QCA6174_HW_2_1_CHIP_ID_REV:
+ case QCA6174_HW_2_2_CHIP_ID_REV:
return 3;
case QCA6174_HW_1_3_CHIP_ID_REV:
return 2;
- case QCA6174_HW_2_1_CHIP_ID_REV:
- case QCA6174_HW_2_2_CHIP_ID_REV:
- return 6;
case QCA6174_HW_3_0_CHIP_ID_REV:
case QCA6174_HW_3_1_CHIP_ID_REV:
case QCA6174_HW_3_2_CHIP_ID_REV:
@@ -1967,15 +2137,15 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
- ret = ath10k_pci_wake(ar);
- if (ret) {
- ath10k_err(ar, "failed to wake up target: %d\n", ret);
- return ret;
- }
+ pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ &ar_pci->link_ctl);
+ pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
/*
* Bring the target up cleanly.
@@ -2023,7 +2193,6 @@ err_ce:
ath10k_pci_ce_deinit(ar);
err_sleep:
- ath10k_pci_sleep(ar);
return ret;
}
@@ -2034,28 +2203,18 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
/* Currently hif_power_up performs effectively a reset and hif_stop
* resets the chip as well so there's no point in resetting here.
*/
-
- ath10k_pci_sleep(ar);
}
#ifdef CONFIG_PM
-#define ATH10K_PCI_PM_CONTROL 0x44
-
static int ath10k_pci_hif_suspend(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct pci_dev *pdev = ar_pci->pdev;
- u32 val;
-
- pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
- if ((val & 0x000000ff) != 0x3) {
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
- (val & 0xffffff00) | 0x03);
- }
+ /* The grace timer can still be counting down and ar->ps_awake be true.
+ * It is known that the device may be asleep after resuming regardless
+ * of the SoC powersave state before suspending. Hence make sure the
+ * device is asleep before proceeding.
+ */
+ ath10k_pci_sleep_sync(ar);
return 0;
}
@@ -2066,22 +2225,14 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
struct pci_dev *pdev = ar_pci->pdev;
u32 val;
- pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
- if ((val & 0x000000ff) != 0) {
- pci_restore_state(pdev);
- pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
- val & 0xffffff00);
- /*
- * Suspend/Resume resets the PCI configuration space,
- * so we have to re-disable the RETRY_TIMEOUT register (0x41)
- * to keep PCI Tx retries from interfering with C3 CPU state
- */
- pci_read_config_dword(pdev, 0x40, &val);
-
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- }
+ /* Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
+ * from interfering with C3 CPU state. pci_restore_state won't help
+ * here since it only restores the first 64 bytes pci config header.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
return 0;
}
@@ -2497,7 +2648,6 @@ static int ath10k_pci_claim(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct pci_dev *pdev = ar_pci->pdev;
- u32 lcr_val;
int ret;
pci_set_drvdata(pdev, ar);
@@ -2531,10 +2681,6 @@ static int ath10k_pci_claim(struct ath10k *ar)
pci_set_master(pdev);
- /* Workaround: Disable ASPM */
- pci_read_config_dword(pdev, 0x80, &lcr_val);
- pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
-
/* Arrange for access to Target SoC registers. */
ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
if (!ar_pci->mem) {
@@ -2621,9 +2767,19 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->dev = &pdev->dev;
ar_pci->ar = ar;
+ if (pdev->subsystem_vendor || pdev->subsystem_device)
+ scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
+ "%04x:%04x:%04x:%04x",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
spin_lock_init(&ar_pci->ce_lock);
+ spin_lock_init(&ar_pci->ps_lock);
+
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
(unsigned long)ar);
+ setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
+ (unsigned long)ar);
ret = ath10k_pci_claim(ar);
if (ret) {
@@ -2631,12 +2787,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_core_destroy;
}
- ret = ath10k_pci_wake(ar);
- if (ret) {
- ath10k_err(ar, "failed to wake up: %d\n", ret);
- goto err_release;
- }
-
ret = ath10k_pci_alloc_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
@@ -2678,11 +2828,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
pdev->device, chip_id);
- goto err_sleep;
+ goto err_free_irq;
}
- ath10k_pci_sleep(ar);
-
ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
@@ -2702,9 +2850,6 @@ err_free_pipes:
ath10k_pci_free_pipes(ar);
err_sleep:
- ath10k_pci_sleep(ar);
-
-err_release:
ath10k_pci_release(ar);
err_core_destroy:
@@ -2734,6 +2879,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_pci_deinit_irq(ar);
ath10k_pci_ce_deinit(ar);
ath10k_pci_free_pipes(ar);
+ ath10k_pci_sleep_sync(ar);
ath10k_pci_release(ar);
ath10k_core_destroy(ar);
}
@@ -2770,7 +2916,19 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
+
+/* QCA988x 2.0 firmware files */
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
+
+/* QCA6174 2.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
+
+/* QCA6174 3.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index bddf54320160..d7696ddc03c4 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -185,6 +185,41 @@ struct ath10k_pci {
/* Map CE id to ce_state */
struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
struct timer_list rx_post_retry;
+
+ /* Due to HW quirks it is recommended to disable ASPM during device
+ * bootup. To do that the original PCI-E Link Control is stored before
+ * device bootup is executed and re-programmed later.
+ */
+ u16 link_ctl;
+
+ /* Protects ps_awake and ps_wake_refcount */
+ spinlock_t ps_lock;
+
+ /* The device has a special powersave-oriented register. When device is
+ * considered asleep it drains less power and driver is forbidden from
+ * accessing most MMIO registers. If host were to access them without
+ * waking up the device might scribble over host memory or return
+ * 0xdeadbeef readouts.
+ */
+ unsigned long ps_wake_refcount;
+
+ /* Waking up takes some time (up to 2ms in some cases) so it can be bad
+ * for latency. To mitigate this the device isn't immediately allowed
+ * to sleep after all references are undone - instead there's a grace
+ * period after which the powersave register is updated unless some
+ * activity to/from device happened in the meantime.
+ *
+ * Also see comments on ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC.
+ */
+ struct timer_list ps_timer;
+
+ /* MMIO registers are used to communicate with the device. With
+ * intensive traffic accessing powersave register would be a bit
+ * wasteful overhead and would needlessly stall CPU. It is far more
+ * efficient to rely on a variable in RAM and update it only upon
+ * powersave register state changes.
+ */
+ bool ps_awake;
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -209,61 +244,25 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
* for this device; but that's not guaranteed.
*/
#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
- (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \
+ (((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS | \
CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
0x100000 | ((addr) & 0xfffff))
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
#define DIAG_ACCESS_CE_TIMEOUT_MS 10
-/* Target exposes its registers for direct access. However before host can
- * access them it needs to make sure the target is awake (ath10k_pci_wake,
- * ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go
- * to sleep unless host tells it to (ath10k_pci_sleep).
- *
- * If host tries to access target registers without waking it up it can
- * scribble over host memory.
- *
- * If target is asleep waking it up may take up to even 2ms.
- */
-
-static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
- u32 value)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- iowrite32(value, ar_pci->mem + offset);
-}
-
-static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- return ioread32(ar_pci->mem + offset);
-}
-
-static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
-{
- return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
-}
-
-static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
-{
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
-}
-
-static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
-}
+void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value);
+void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val);
+void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val);
-static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+u32 ath10k_pci_read32(struct ath10k *ar, u32 offset);
+u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr);
+u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr);
- iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
-}
+/* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too
+ * frequently. To avoid this put SoC to sleep after a very conservative grace
+ * period. Adjust with great care.
+ */
+#define ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC 60
#endif /* _PCI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index e9cc7787bf5f..492b5a5af434 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -661,6 +661,28 @@ struct rx_msdu_end {
#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
#define RX_PPDU_START_INFO5_SERVICE_LSB 0
+/* No idea what this flag means. It seems to be always set in rate. */
+#define RX_PPDU_START_RATE_FLAG BIT(3)
+
+enum rx_ppdu_start_rate {
+ RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M,
+ RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M,
+ RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M,
+ RX_PPDU_START_RATE_OFDM_6M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M,
+ RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M,
+ RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M,
+ RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M,
+ RX_PPDU_START_RATE_OFDM_9M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M,
+
+ RX_PPDU_START_RATE_CCK_LP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M,
+ RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M,
+ RX_PPDU_START_RATE_CCK_LP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M,
+ RX_PPDU_START_RATE_CCK_LP_1M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M,
+ RX_PPDU_START_RATE_CCK_SP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M,
+ RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M,
+ RX_PPDU_START_RATE_CCK_SP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M,
+};
+
struct rx_ppdu_start {
struct {
u8 pri20_mhz;
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index d22addf6118b..8dcd424aa502 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -519,9 +519,12 @@ int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
int ath10k_spectral_create(struct ath10k *ar)
{
+ /* The buffer size covers whole channels in dual bands up to 128 bins.
+ * Scan with bigger than 128 bins needs to be run on single band each.
+ */
ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
ar->debug.debugfs_phy,
- 1024, 256,
+ 1140, 2500,
&rfs_spec_scan_cb, NULL);
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index aede750809fe..1a899d70dc5d 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -23,102 +23,50 @@
#include "debug.h"
#include "wmi-ops.h"
-static int ath10k_thermal_get_active_vifs(struct ath10k *ar,
- enum wmi_vdev_type type)
+static int
+ath10k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
{
- struct ath10k_vif *arvif;
- int count = 0;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- list_for_each_entry(arvif, &ar->arvifs, list) {
- if (!arvif->is_started)
- continue;
-
- if (!arvif->is_up)
- continue;
-
- if (arvif->vdev_type != type)
- continue;
-
- count++;
- }
- return count;
-}
-
-static int ath10k_thermal_get_max_dutycycle(struct thermal_cooling_device *cdev,
- unsigned long *state)
-{
- *state = ATH10K_QUIET_DUTY_CYCLE_MAX;
+ *state = ATH10K_THERMAL_THROTTLE_MAX;
return 0;
}
-static int ath10k_thermal_get_cur_dutycycle(struct thermal_cooling_device *cdev,
- unsigned long *state)
+static int
+ath10k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
{
struct ath10k *ar = cdev->devdata;
mutex_lock(&ar->conf_mutex);
- *state = ar->thermal.duty_cycle;
+ *state = ar->thermal.throttle_state;
mutex_unlock(&ar->conf_mutex);
return 0;
}
-static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
- unsigned long duty_cycle)
+static int
+ath10k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long throttle_state)
{
struct ath10k *ar = cdev->devdata;
- u32 period, duration, enabled;
- int num_bss, ret = 0;
- mutex_lock(&ar->conf_mutex);
- if (ar->state != ATH10K_STATE_ON) {
- ret = -ENETDOWN;
- goto out;
- }
-
- if (duty_cycle > ATH10K_QUIET_DUTY_CYCLE_MAX) {
- ath10k_warn(ar, "duty cycle %ld is exceeding the limit %d\n",
- duty_cycle, ATH10K_QUIET_DUTY_CYCLE_MAX);
- ret = -EINVAL;
- goto out;
- }
- /* TODO: Right now, thermal mitigation is handled only for single/multi
- * vif AP mode. Since quiet param is not validated in STA mode, it needs
- * to be investigated further to handle multi STA and multi-vif (AP+STA)
- * mode properly.
- */
- num_bss = ath10k_thermal_get_active_vifs(ar, WMI_VDEV_TYPE_AP);
- if (!num_bss) {
- ath10k_warn(ar, "no active AP interfaces\n");
- ret = -ENETDOWN;
- goto out;
- }
- period = max(ATH10K_QUIET_PERIOD_MIN,
- (ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
- duration = (period * duty_cycle) / 100;
- enabled = duration ? 1 : 0;
-
- ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
- ATH10K_QUIET_START_OFFSET,
- enabled);
- if (ret) {
- ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
- period, duration, enabled, ret);
- goto out;
+ if (throttle_state > ATH10K_THERMAL_THROTTLE_MAX) {
+ ath10k_warn(ar, "throttle state %ld is exceeding the limit %d\n",
+ throttle_state, ATH10K_THERMAL_THROTTLE_MAX);
+ return -EINVAL;
}
- ar->thermal.duty_cycle = duty_cycle;
-out:
+ mutex_lock(&ar->conf_mutex);
+ ar->thermal.throttle_state = throttle_state;
+ ath10k_thermal_set_throttling(ar);
mutex_unlock(&ar->conf_mutex);
- return ret;
+ return 0;
}
static struct thermal_cooling_device_ops ath10k_thermal_ops = {
- .get_max_state = ath10k_thermal_get_max_dutycycle,
- .get_cur_state = ath10k_thermal_get_cur_dutycycle,
- .set_cur_state = ath10k_thermal_set_cur_dutycycle,
+ .get_max_state = ath10k_thermal_get_max_throttle_state,
+ .get_cur_state = ath10k_thermal_get_cur_throttle_state,
+ .set_cur_state = ath10k_thermal_set_cur_throttle_state,
};
static ssize_t ath10k_thermal_show_temp(struct device *dev,
@@ -127,6 +75,7 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
{
struct ath10k *ar = dev_get_drvdata(dev);
int ret, temperature;
+ unsigned long time_left;
mutex_lock(&ar->conf_mutex);
@@ -148,9 +97,9 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
goto out;
}
- ret = wait_for_completion_timeout(&ar->thermal.wmi_sync,
- ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
- if (ret == 0) {
+ time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+ ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
+ if (!time_left) {
ath10k_warn(ar, "failed to synchronize thermal read\n");
ret = -ETIMEDOUT;
goto out;
@@ -184,6 +133,32 @@ static struct attribute *ath10k_hwmon_attrs[] = {
};
ATTRIBUTE_GROUPS(ath10k_hwmon);
+void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+ u32 period, duration, enabled;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
+ return;
+
+ if (ar->state != ATH10K_STATE_ON)
+ return;
+
+ period = ar->thermal.quiet_period;
+ duration = (period * ar->thermal.throttle_state) / 100;
+ enabled = duration ? 1 : 0;
+
+ ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
+ ATH10K_QUIET_START_OFFSET,
+ enabled);
+ if (ret) {
+ ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
+ period, duration, enabled, ret);
+ }
+}
+
int ath10k_thermal_register(struct ath10k *ar)
{
struct thermal_cooling_device *cdev;
@@ -202,11 +177,12 @@ int ath10k_thermal_register(struct ath10k *ar)
ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
"cooling_device");
if (ret) {
- ath10k_err(ar, "failed to create thermal symlink\n");
+ ath10k_err(ar, "failed to create cooling device symlink\n");
goto err_cooling_destroy;
}
ar->thermal.cdev = cdev;
+ ar->thermal.quiet_period = ATH10K_QUIET_PERIOD_DEFAULT;
/* Do not register hwmon device when temperature reading is not
* supported by firmware
@@ -231,7 +207,7 @@ int ath10k_thermal_register(struct ath10k *ar)
return 0;
err_remove_link:
- sysfs_remove_link(&ar->dev->kobj, "thermal_sensor");
+ sysfs_remove_link(&ar->dev->kobj, "cooling_device");
err_cooling_destroy:
thermal_cooling_device_unregister(cdev);
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
index bccc17ae0fde..b610ea5caae8 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.h
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -19,16 +19,17 @@
#define ATH10K_QUIET_PERIOD_DEFAULT 100
#define ATH10K_QUIET_PERIOD_MIN 25
#define ATH10K_QUIET_START_OFFSET 10
-#define ATH10K_QUIET_DUTY_CYCLE_MAX 70
#define ATH10K_HWMON_NAME_LEN 15
#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
+#define ATH10K_THERMAL_THROTTLE_MAX 100
struct ath10k_thermal {
struct thermal_cooling_device *cdev;
struct completion wmi_sync;
/* protected by conf_mutex */
- u32 duty_cycle;
+ u32 throttle_state;
+ u32 quiet_period;
/* temperature value in Celcius degree
* protected by data_lock
*/
@@ -39,6 +40,7 @@ struct ath10k_thermal {
int ath10k_thermal_register(struct ath10k *ar);
void ath10k_thermal_unregister(struct ath10k *ar);
void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
+void ath10k_thermal_set_throttling(struct ath10k *ar);
#else
static inline int ath10k_thermal_register(struct ath10k *ar)
{
@@ -54,5 +56,9 @@ static inline void ath10k_thermal_event_temperature(struct ath10k *ar,
{
}
+static inline void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+}
+
#endif
#endif /* _THERMAL_ */
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 5407887380ab..71bdb368813d 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -21,11 +21,16 @@
#include "core.h"
#if !defined(_TRACE_H_)
-static inline u32 ath10k_frm_hdr_len(const void *buf)
+static inline u32 ath10k_frm_hdr_len(const void *buf, size_t len)
{
const struct ieee80211_hdr *hdr = buf;
- return ieee80211_hdrlen(hdr->frame_control);
+ /* In some rare cases (e.g. fcs error) device reports frame buffer
+ * shorter than what frame header implies (e.g. len = 0). The buffer
+ * can still be accessed so do a simple min() to guarantee caller
+ * doesn't get value greater than len.
+ */
+ return min_t(u32, len, ieee80211_hdrlen(hdr->frame_control));
}
#endif
@@ -46,7 +51,7 @@ static inline void trace_ ## name(proto) {}
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ath10k
-#define ATH10K_MSG_MAX 200
+#define ATH10K_MSG_MAX 400
DECLARE_EVENT_CLASS(ath10k_log_event,
TP_PROTO(struct ath10k *ar, struct va_format *vaf),
@@ -360,13 +365,13 @@ DECLARE_EVENT_CLASS(ath10k_hdr_event,
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, len)
- __dynamic_array(u8, data, ath10k_frm_hdr_len(data))
+ __dynamic_array(u8, data, ath10k_frm_hdr_len(data, len))
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
- __entry->len = ath10k_frm_hdr_len(data);
+ __entry->len = ath10k_frm_hdr_len(data, len);
memcpy(__get_dynamic_array(data), data, __entry->len);
),
@@ -387,15 +392,16 @@ DECLARE_EVENT_CLASS(ath10k_payload_event,
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, len)
- __dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data)))
+ __dynamic_array(u8, payload, (len -
+ ath10k_frm_hdr_len(data, len)))
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
- __entry->len = len - ath10k_frm_hdr_len(data);
+ __entry->len = len - ath10k_frm_hdr_len(data, len);
memcpy(__get_dynamic_array(payload),
- data + ath10k_frm_hdr_len(data), __entry->len);
+ data + ath10k_frm_hdr_len(data, len), __entry->len);
),
TP_printk(
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 3f00cec8aef5..826500bb2b1b 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -55,8 +55,10 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
lockdep_assert_held(&htt->tx_lock);
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
- tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
+ tx_done->msdu_id, !!tx_done->discard,
+ !!tx_done->no_ack, !!tx_done->success);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
@@ -97,6 +99,9 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
if (tx_done->no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
+ if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index c8b64e7a6089..47fe2e756bec 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -45,6 +45,10 @@ struct wmi_ops {
struct wmi_rdy_ev_arg *arg);
int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
struct ath10k_fw_stats *stats);
+ int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg);
+ int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg);
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
@@ -81,7 +85,8 @@ struct wmi_ops {
struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN]);
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type);
struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]);
struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
@@ -148,6 +153,27 @@ struct wmi_ops {
u32 num_ac);
struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
const struct wmi_sta_keepalive_arg *arg);
+ struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
+ struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable);
+ struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
+ struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id,
+ const u8 *pattern,
+ const u8 *mask,
+ int pattern_len,
+ int pattern_offset);
+ struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id);
+ struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_tdls_state state);
+ struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan);
+ struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -274,6 +300,26 @@ ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
}
static inline int
+ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_roam_ev)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_wow_event)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_wow_event(ar, skb, arg);
+}
+
+static inline int
ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
@@ -624,14 +670,15 @@ ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
static inline int
ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN])
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_peer_create)
return -EOPNOTSUPP;
- skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
+ skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1060,4 +1107,145 @@ ath10k_wmi_sta_keepalive(struct ath10k *ar,
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
+static inline int
+ath10k_wmi_wow_enable(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_enable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_enable(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_enable_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_add_wakeup_event)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_add_pattern)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
+ pattern, mask, pattern_len,
+ pattern_offset);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_del_pattern)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tdls_state state)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_update_fw_tdls_state)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
+}
+
+static inline int
+ath10k_wmi_tdls_peer_update(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_tdls_peer_update)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->tdls_peer_update_cmdid);
+}
+
+static inline int
+ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_adaptive_qcs)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
+}
+
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index ee0c5f602e29..563fde73623c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -16,10 +16,13 @@
*/
#include "core.h"
#include "debug.h"
+#include "mac.h"
#include "hw.h"
+#include "mac.h"
#include "wmi.h"
#include "wmi-ops.h"
#include "wmi-tlv.h"
+#include "p2p.h"
/***************/
/* TLV helpers */
@@ -31,9 +34,9 @@ struct wmi_tlv_policy {
static const struct wmi_tlv_policy wmi_tlv_policies[] = {
[WMI_TLV_TAG_ARRAY_BYTE]
- = { .min_len = sizeof(u8) },
+ = { .min_len = 0 },
[WMI_TLV_TAG_ARRAY_UINT32]
- = { .min_len = sizeof(u32) },
+ = { .min_len = 0 },
[WMI_TLV_TAG_STRUCT_SCAN_EVENT]
= { .min_len = sizeof(struct wmi_scan_event) },
[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
@@ -62,6 +65,14 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
+ [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
+ [WMI_TLV_TAG_STRUCT_ROAM_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_roam_ev) },
+ [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
+ = { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
+ [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
};
static int
@@ -168,6 +179,7 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
{
const void **tb;
const struct wmi_tlv_bcn_tx_status_ev *ev;
+ struct ath10k_vif *arvif;
u32 vdev_id, tx_status;
int ret;
@@ -201,6 +213,10 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
break;
}
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (arvif && arvif->is_up && arvif->vif->csa_active)
+ ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
+
kfree(tb);
return 0;
}
@@ -296,6 +312,83 @@ static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
return 0;
}
+static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_p2p_noa_ev *ev;
+ const struct wmi_p2p_noa_info *noa;
+ int ret, vdev_id;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
+ noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
+
+ if (!ev || !noa) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv p2p noa vdev_id %i descriptors %hhu\n",
+ vdev_id, noa->num_descriptors);
+
+ ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_tx_pause_ev *ev;
+ int ret, vdev_id;
+ u32 pause_id, action, vdev_map, peer_id, tid_map;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ pause_id = __le32_to_cpu(ev->pause_id);
+ action = __le32_to_cpu(ev->action);
+ vdev_map = __le32_to_cpu(ev->vdev_map);
+ peer_id = __le32_to_cpu(ev->peer_id);
+ tid_map = __le32_to_cpu(ev->tid_map);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
+ pause_id, action, vdev_map, peer_id, tid_map);
+
+ for (vdev_id = 0; vdev_map; vdev_id++) {
+ if (!(vdev_map & BIT(vdev_id)))
+ continue;
+
+ vdev_map &= ~BIT(vdev_id);
+ ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
+ }
+
+ kfree(tb);
+ return 0;
+}
+
/***********/
/* TLV ops */
/***********/
@@ -417,6 +510,12 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_DIAG_EVENTID:
ath10k_wmi_tlv_event_diag(ar, skb);
break;
+ case WMI_TLV_P2P_NOA_EVENTID:
+ ath10k_wmi_tlv_event_p2p_noa(ar, skb);
+ break;
+ case WMI_TLV_TX_PAUSE_EVENTID:
+ ath10k_wmi_tlv_event_tx_pause(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@@ -1012,6 +1111,65 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
return 0;
}
+static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_roam_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = ev->vdev_id;
+ arg->reason = ev->reason;
+ arg->rssi = ev->rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_wow_event_info *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = __le32_to_cpu(ev->vdev_id);
+ arg->flag = __le32_to_cpu(ev->flag);
+ arg->wake_reason = __le32_to_cpu(ev->wake_reason);
+ arg->data_len = __le32_to_cpu(ev->data_len);
+
+ kfree(tb);
+ return 0;
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
{
@@ -1160,8 +1318,8 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
- cfg->num_offload_peers = __cpu_to_le32(3);
- cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
+ cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
} else {
cfg->num_offload_peers = __cpu_to_le32(0);
cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
@@ -1178,8 +1336,8 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
cfg->rx_decap_mode = __cpu_to_le32(1);
cfg->scan_max_pending_reqs = __cpu_to_le32(4);
- cfg->bmiss_offload_max_vdev = __cpu_to_le32(3);
- cfg->roam_offload_max_vdev = __cpu_to_le32(3);
+ cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
cfg->num_mcast_groups = __cpu_to_le32(0);
cfg->num_mcast_table_elems = __cpu_to_le32(0);
@@ -1193,11 +1351,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
cfg->max_frag_entries = __cpu_to_le32(2);
- cfg->num_tdls_vdevs = __cpu_to_le32(1);
+ cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
cfg->num_multicast_filter_entries = __cpu_to_le32(5);
- cfg->num_wow_filters = __cpu_to_le32(0x16);
+ cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
cfg->num_keep_alive_pattern = __cpu_to_le32(6);
cfg->keep_alive_pattern_size = __cpu_to_le32(0);
cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
@@ -1248,7 +1406,7 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
cmd = (void *)tlv->value;
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
- cmd->burst_duration_ms = __cpu_to_le32(0);
+ cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
cmd->num_channels = __cpu_to_le32(arg->n_channels);
cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
@@ -1408,8 +1566,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
void *ptr;
u32 flags = 0;
- if (WARN_ON(arg->ssid && arg->ssid_len == 0))
- return ERR_PTR(-EINVAL);
if (WARN_ON(arg->hidden_ssid && !arg->ssid))
return ERR_PTR(-EINVAL);
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
@@ -1782,7 +1938,8 @@ ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
static struct sk_buff *
ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN])
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
{
struct wmi_tlv_peer_create_cmd *cmd;
struct wmi_tlv *tlv;
@@ -1797,7 +1954,7 @@ ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
- cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */
+ cmd->peer_type = __cpu_to_le32(peer_type);
ether_addr_copy(cmd->peer_addr.addr, peer_addr);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
@@ -2027,7 +2184,7 @@ ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
if (!mac)
return ERR_PTR(-EINVAL);
- skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
if (!skb)
return ERR_PTR(-ENOMEM);
@@ -2485,6 +2642,387 @@ ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
return skb;
}
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tdls_state state)
+{
+ struct wmi_tdls_set_state_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+ /* Set to options from wmi_tlv_tdls_options,
+ * for now none of them are enabled.
+ */
+ u32 options = 0;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->state = __cpu_to_le32(state);
+ cmd->notification_interval_ms = __cpu_to_le32(5000);
+ cmd->tx_discovery_threshold = __cpu_to_le32(100);
+ cmd->tx_teardown_threshold = __cpu_to_le32(5);
+ cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
+ cmd->rssi_delta = __cpu_to_le32(-20);
+ cmd->tdls_options = __cpu_to_le32(options);
+ cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
+ cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
+ cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
+ cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
+ cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
+ state, vdev_id);
+ return skb;
+}
+
+static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
+{
+ u32 peer_qos = 0;
+
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
+
+ peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
+
+ return peer_qos;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan_arg)
+{
+ struct wmi_tdls_peer_update_cmd *cmd;
+ struct wmi_tdls_peer_capab *peer_cap;
+ struct wmi_channel *chan;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u32 peer_qos;
+ void *ptr;
+ int len;
+ int i;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*peer_cap) +
+ sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+ cmd->peer_state = __cpu_to_le32(arg->peer_state);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
+ tlv->len = __cpu_to_le16(sizeof(*peer_cap));
+ peer_cap = (void *)tlv->value;
+ peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
+ cap->peer_max_sp);
+ peer_cap->peer_qos = __cpu_to_le32(peer_qos);
+ peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
+ peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
+ peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
+ peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
+ peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
+ peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
+
+ for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
+ peer_cap->peer_operclass[i] = cap->peer_operclass[i];
+
+ peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
+ peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
+ peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*peer_cap);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
+
+ ptr += sizeof(*tlv);
+
+ for (i = 0; i < cap->peer_chan_len; i++) {
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+ tlv->len = __cpu_to_le16(sizeof(*chan));
+ chan = (void *)tlv->value;
+ ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*chan);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
+ arg->vdev_id, arg->peer_state, cap->peer_chan_len);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
+{
+ struct wmi_tlv_wow_enable_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->enable = __cpu_to_le32(1);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_tlv_wow_add_del_event_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->is_add = __cpu_to_le32(enable);
+ cmd->event_bitmap = __cpu_to_le32(1 << event);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
+{
+ struct wmi_tlv_wow_host_wakeup_ind *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id, const u8 *pattern,
+ const u8 *bitmask, int pattern_len,
+ int pattern_offset)
+{
+ struct wmi_tlv_wow_add_pattern_cmd *cmd;
+ struct wmi_tlv_wow_bitmap_pattern *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ /* cmd */
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->pattern_id = __cpu_to_le32(pattern_id);
+ cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
+ tlv->len = __cpu_to_le16(sizeof(*bitmap));
+ bitmap = (void *)tlv->value;
+
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
+ bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
+ bitmap->pattern_len = __cpu_to_le32(pattern_len);
+ bitmap->bitmask_len = __cpu_to_le32(pattern_len);
+ bitmap->pattern_id = __cpu_to_le32(pattern_id);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(sizeof(u32));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
+ vdev_id, pattern_id, pattern_offset);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id)
+{
+ struct wmi_tlv_wow_del_pattern_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->pattern_id = __cpu_to_le32(pattern_id);
+ cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+ struct wmi_tlv_adaptive_qcs *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->enable = __cpu_to_le32(enable ? 1 : 0);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
+ return skb;
+}
+
/****************/
/* TLV mappings */
/****************/
@@ -2609,6 +3147,9 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+ .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
+ .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
+ .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
};
static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
@@ -2736,6 +3277,8 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
+ .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
+ .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
@@ -2781,6 +3324,14 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
+ .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
+ .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
+ .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
+ .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
+ .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
+ .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
+ .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
+ .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
};
/************/
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index a6c8280cc4b1..ad655c44afdb 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1454,6 +1454,174 @@ struct wmi_tlv_stats_ev {
__le32 num_chan_stats;
} __packed;
+struct wmi_tlv_p2p_noa_ev {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_tlv_roam_ev {
+ __le32 vdev_id;
+ __le32 reason;
+ __le32 rssi;
+} __packed;
+
+struct wmi_tlv_wow_add_del_event_cmd {
+ __le32 vdev_id;
+ __le32 is_add;
+ __le32 event_bitmap;
+} __packed;
+
+struct wmi_tlv_wow_enable_cmd {
+ __le32 enable;
+} __packed;
+
+struct wmi_tlv_wow_host_wakeup_ind {
+ __le32 reserved;
+} __packed;
+
+struct wmi_tlv_wow_event_info {
+ __le32 vdev_id;
+ __le32 flag;
+ __le32 wake_reason;
+ __le32 data_len;
+} __packed;
+
+enum wmi_tlv_pattern_type {
+ WOW_PATTERN_MIN = 0,
+ WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+ WOW_IPV4_SYNC_PATTERN,
+ WOW_IPV6_SYNC_PATTERN,
+ WOW_WILD_CARD_PATTERN,
+ WOW_TIMER_PATTERN,
+ WOW_MAGIC_PATTERN,
+ WOW_IPV6_RA_PATTERN,
+ WOW_IOAC_PKT_PATTERN,
+ WOW_IOAC_TMR_PATTERN,
+ WOW_PATTERN_MAX
+};
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
+#define WOW_DEFAULT_BITMASK_SIZE 148
+
+struct wmi_tlv_wow_bitmap_pattern {
+ u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+ u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+ __le32 pattern_offset;
+ __le32 pattern_len;
+ __le32 bitmask_len;
+ __le32 pattern_id;
+} __packed;
+
+struct wmi_tlv_wow_add_pattern_cmd {
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+struct wmi_tlv_wow_del_pattern_cmd {
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+/* TDLS Options */
+enum wmi_tlv_tdls_options {
+ WMI_TLV_TDLS_OFFCHAN_EN = BIT(0),
+ WMI_TLV_TDLS_BUFFER_STA_EN = BIT(1),
+ WMI_TLV_TDLS_SLEEP_STA_EN = BIT(2),
+};
+
+struct wmi_tdls_set_state_cmd {
+ __le32 vdev_id;
+ __le32 state;
+ __le32 notification_interval_ms;
+ __le32 tx_discovery_threshold;
+ __le32 tx_teardown_threshold;
+ __le32 rssi_teardown_threshold;
+ __le32 rssi_delta;
+ __le32 tdls_options;
+ __le32 tdls_peer_traffic_ind_window;
+ __le32 tdls_peer_traffic_response_timeout_ms;
+ __le32 tdls_puapsd_mask;
+ __le32 tdls_puapsd_inactivity_time_ms;
+ __le32 tdls_puapsd_rx_frame_threshold;
+} __packed;
+
+struct wmi_tdls_peer_update_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_state;
+} __packed;
+
+enum {
+ WMI_TLV_TDLS_PEER_QOS_AC_VO = BIT(0),
+ WMI_TLV_TDLS_PEER_QOS_AC_VI = BIT(1),
+ WMI_TLV_TDLS_PEER_QOS_AC_BK = BIT(2),
+ WMI_TLV_TDLS_PEER_QOS_AC_BE = BIT(3),
+};
+
+#define WMI_TLV_TDLS_PEER_SP_MASK 0x60
+#define WMI_TLV_TDLS_PEER_SP_LSB 5
+
+struct wmi_tdls_peer_capab {
+ __le32 peer_qos;
+ __le32 buff_sta_support;
+ __le32 off_chan_support;
+ __le32 peer_curr_operclass;
+ __le32 self_curr_operclass;
+ __le32 peer_chan_len;
+ __le32 peer_operclass_len;
+ u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+ __le32 is_peer_responder;
+ __le32 pref_offchan_num;
+ __le32 pref_offchan_bw;
+} __packed;
+
+struct wmi_tlv_adaptive_qcs {
+ __le32 enable;
+} __packed;
+
+/**
+ * wmi_tlv_tx_pause_id - firmware tx queue pause reason types
+ *
+ * @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
+ * Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: peer in AP mode is asleep.
+ * Only peer_id is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PS: When all peers are asleep in AP mode. Only
+ * vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_IBSS_PS: When all peers are asleep in IBSS mode. Only
+ * vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_HOST: Host itself requested tx pause.
+ */
+enum wmi_tlv_tx_pause_id {
+ WMI_TLV_TX_PAUSE_ID_MCC = 1,
+ WMI_TLV_TX_PAUSE_ID_AP_PEER_PS = 2,
+ WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD = 3,
+ WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA = 4,
+ WMI_TLV_TX_PAUSE_ID_P2P_GO_PS = 5,
+ WMI_TLV_TX_PAUSE_ID_STA_ADD_BA = 6,
+ WMI_TLV_TX_PAUSE_ID_AP_PS = 7,
+ WMI_TLV_TX_PAUSE_ID_IBSS_PS = 8,
+ WMI_TLV_TX_PAUSE_ID_HOST = 21,
+};
+
+enum wmi_tlv_tx_pause_action {
+ WMI_TLV_TX_PAUSE_ACTION_STOP,
+ WMI_TLV_TX_PAUSE_ACTION_WAKE,
+};
+
+struct wmi_tlv_tx_pause_ev {
+ __le32 pause_id;
+ __le32 action;
+ __le32 vdev_map;
+ __le32 peer_id;
+ __le32 tid_map;
+} __packed;
+
void ath10k_wmi_tlv_attach(struct ath10k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index c7ea77edce24..0fabe689179c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -26,6 +26,7 @@
#include "mac.h"
#include "testmode.h"
#include "wmi-ops.h"
+#include "p2p.h"
/* MAIN WMI cmd track */
static struct wmi_cmd_map wmi_cmd_map = {
@@ -884,20 +885,24 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
- int ret;
+ unsigned long time_left;
- ret = wait_for_completion_timeout(&ar->wmi.service_ready,
- WMI_SERVICE_READY_TIMEOUT_HZ);
- return ret;
+ time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+ return 0;
}
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
{
- int ret;
+ unsigned long time_left;
- ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
- WMI_UNIFIED_READY_TIMEOUT_HZ);
- return ret;
+ time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
+ WMI_UNIFIED_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+ return 0;
}
struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
@@ -1351,63 +1356,6 @@ static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
return band;
}
-static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
-{
- u8 rate_idx = 0;
-
- /* rate in Kbps */
- switch (rate) {
- case 1000:
- rate_idx = 0;
- break;
- case 2000:
- rate_idx = 1;
- break;
- case 5500:
- rate_idx = 2;
- break;
- case 11000:
- rate_idx = 3;
- break;
- case 6000:
- rate_idx = 4;
- break;
- case 9000:
- rate_idx = 5;
- break;
- case 12000:
- rate_idx = 6;
- break;
- case 18000:
- rate_idx = 7;
- break;
- case 24000:
- rate_idx = 8;
- break;
- case 36000:
- rate_idx = 9;
- break;
- case 48000:
- rate_idx = 10;
- break;
- case 54000:
- rate_idx = 11;
- break;
- default:
- break;
- }
-
- if (band == IEEE80211_BAND_5GHZ) {
- if (rate_idx > 3)
- /* Omit CCK rates */
- rate_idx -= 4;
- else
- rate_idx = 0;
- }
-
- return rate_idx;
-}
-
/* If keys are configured, HW decrypts all frames
* with protected bit set. Mark such frames as decrypted.
*/
@@ -1489,6 +1437,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
struct wmi_mgmt_rx_ev_arg arg = {};
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
+ struct ieee80211_supported_band *sband;
u32 rx_status;
u32 channel;
u32 phy_mode;
@@ -1559,9 +1508,11 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+ sband = &ar->mac.sbands[status->band];
+
status->freq = ieee80211_channel_to_frequency(channel, status->band);
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
- status->rate_idx = get_rate_idx(rate, status->band);
+ status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
@@ -1585,6 +1536,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
}
}
+ if (ieee80211_is_beacon(hdr->frame_control))
+ ath10k_mac_handle_beacon(ar, skb);
+
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx skb %p len %d ftype %02x stype %02x\n",
skb, skb->len,
@@ -1691,10 +1645,10 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
survey = &ar->survey[idx];
survey->time = WMI_CHAN_INFO_MSEC(cycle_count);
- survey->time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
+ survey->time_busy = WMI_CHAN_INFO_MSEC(rx_clear_count);
survey->noise = noise_floor;
survey->filled = SURVEY_INFO_TIME |
- SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BUSY |
SURVEY_INFO_NOISE_DBM;
}
@@ -2276,109 +2230,25 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
tim->bitmap_ctrl, pvm_len);
}
-static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
- const struct wmi_p2p_noa_info *noa)
-{
- struct ieee80211_p2p_noa_attr *noa_attr;
- u8 ctwindow_oppps = noa->ctwindow_oppps;
- u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
- bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
- __le16 *noa_attr_len;
- u16 attr_len;
- u8 noa_descriptors = noa->num_descriptors;
- int i;
-
- /* P2P IE */
- data[0] = WLAN_EID_VENDOR_SPECIFIC;
- data[1] = len - 2;
- data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
- data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
- data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
- data[5] = WLAN_OUI_TYPE_WFA_P2P;
-
- /* NOA ATTR */
- data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
- noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
- noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
-
- noa_attr->index = noa->index;
- noa_attr->oppps_ctwindow = ctwindow;
- if (oppps)
- noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
-
- for (i = 0; i < noa_descriptors; i++) {
- noa_attr->desc[i].count =
- __le32_to_cpu(noa->descriptors[i].type_count);
- noa_attr->desc[i].duration = noa->descriptors[i].duration;
- noa_attr->desc[i].interval = noa->descriptors[i].interval;
- noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
- }
-
- attr_len = 2; /* index + oppps_ctwindow */
- attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
- *noa_attr_len = __cpu_to_le16(attr_len);
-}
-
-static u32 ath10k_p2p_calc_noa_ie_len(const struct wmi_p2p_noa_info *noa)
-{
- u32 len = 0;
- u8 noa_descriptors = noa->num_descriptors;
- u8 opp_ps_info = noa->ctwindow_oppps;
- bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
-
- if (!noa_descriptors && !opps_enabled)
- return len;
-
- len += 1 + 1 + 4; /* EID + len + OUI */
- len += 1 + 2; /* noa attr + attr len */
- len += 1 + 1; /* index + oppps_ctwindow */
- len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
-
- return len;
-}
-
static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
struct sk_buff *bcn,
const struct wmi_p2p_noa_info *noa)
{
- u8 *new_data, *old_data = arvif->u.ap.noa_data;
- u32 new_len;
-
if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
return;
ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
- if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
- new_len = ath10k_p2p_calc_noa_ie_len(noa);
- if (!new_len)
- goto cleanup;
- new_data = kmalloc(new_len, GFP_ATOMIC);
- if (!new_data)
- goto cleanup;
-
- ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
-
- spin_lock_bh(&ar->data_lock);
- arvif->u.ap.noa_data = new_data;
- arvif->u.ap.noa_len = new_len;
- spin_unlock_bh(&ar->data_lock);
- kfree(old_data);
- }
+ if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
+ ath10k_p2p_noa_update(arvif, noa);
if (arvif->u.ap.noa_data)
if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
memcpy(skb_put(bcn, arvif->u.ap.noa_len),
arvif->u.ap.noa_data,
arvif->u.ap.noa_len);
- return;
-cleanup:
- spin_lock_bh(&ar->data_lock);
- arvif->u.ap.noa_data = NULL;
- arvif->u.ap.noa_len = 0;
- spin_unlock_bh(&ar->data_lock);
- kfree(old_data);
+ return;
}
static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
@@ -2555,6 +2425,7 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
u64 tsf)
{
u32 reg0, reg1, tsf32l;
+ struct ieee80211_channel *ch;
struct pulse_event pe;
u64 tsf64;
u8 rssi, width;
@@ -2583,6 +2454,15 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
if (!ar->dfs_detector)
return;
+ spin_lock_bh(&ar->data_lock);
+ ch = ar->rx_channel;
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!ch) {
+ ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
+ goto radar_detected;
+ }
+
/* report event to DFS pattern detector */
tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
tsf64 = tsf & (~0xFFFFFFFFULL);
@@ -2598,10 +2478,10 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
rssi = 0;
pe.ts = tsf64;
- pe.freq = ar->hw->conf.chandef.chan->center_freq;
+ pe.freq = ch->center_freq;
pe.width = width;
pe.rssi = rssi;
-
+ pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
pe.freq, pe.width, pe.rssi, pe.ts);
@@ -2614,6 +2494,7 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
return;
}
+radar_detected:
ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
ATH10K_DFS_STAT_INC(ar, radar_detected);
@@ -2872,7 +2753,43 @@ void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
+ struct wmi_roam_ev_arg arg = {};
+ int ret;
+ u32 vdev_id;
+ u32 reason;
+ s32 rssi;
+
+ ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
+ return;
+ }
+
+ vdev_id = __le32_to_cpu(arg.vdev_id);
+ reason = __le32_to_cpu(arg.reason);
+ rssi = __le32_to_cpu(arg.rssi);
+ rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi roam event vdev %u reason 0x%08x rssi %d\n",
+ vdev_id, reason, rssi);
+
+ if (reason >= WMI_ROAM_REASON_MAX)
+ ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
+ reason, vdev_id);
+
+ switch (reason) {
+ case WMI_ROAM_REASON_BEACON_MISS:
+ ath10k_mac_handle_beacon_miss(ar, vdev_id);
+ break;
+ case WMI_ROAM_REASON_BETTER_AP:
+ case WMI_ROAM_REASON_LOW_RSSI:
+ case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+ case WMI_ROAM_REASON_HO_FAILED:
+ ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
+ reason, vdev_id);
+ break;
+ }
}
void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
@@ -2942,7 +2859,19 @@ void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
+ struct wmi_wow_ev_arg ev = {};
+ int ret;
+
+ complete(&ar->wow.wakeup_completed);
+
+ ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
+ wow_reason(ev.wake_reason));
}
void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
@@ -3231,6 +3160,21 @@ static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ struct wmi_roam_ev *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_id = ev->vdev_id;
+ arg->reason = ev->reason;
+
+ return 0;
+}
+
int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_rdy_ev_arg arg = {};
@@ -3989,6 +3933,8 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
cmd = (struct wmi_init_cmd_10_2 *)buf->data;
features = WMI_10_2_RX_BATCH_MODE;
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
+ features |= WMI_10_2_COEX_GPIO;
cmd->resource_config.feature_mask = __cpu_to_le32(features);
memcpy(&cmd->resource_config.common, &config, sizeof(config));
@@ -4315,8 +4261,6 @@ ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
const char *cmdname;
u32 flags = 0;
- if (WARN_ON(arg->ssid && arg->ssid_len == 0))
- return ERR_PTR(-EINVAL);
if (WARN_ON(arg->hidden_ssid && !arg->ssid))
return ERR_PTR(-EINVAL);
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
@@ -4539,7 +4483,8 @@ ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
static struct sk_buff *
ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN])
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
{
struct wmi_peer_create_cmd *cmd;
struct sk_buff *skb;
@@ -5223,6 +5168,7 @@ static const struct wmi_ops wmi_ops = {
.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5268,6 +5214,7 @@ static const struct wmi_ops wmi_ops = {
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
};
static const struct wmi_ops wmi_10_1_ops = {
@@ -5290,6 +5237,7 @@ static const struct wmi_ops wmi_10_1_ops = {
.pull_swba = ath10k_wmi_op_pull_swba_ev,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5330,6 +5278,7 @@ static const struct wmi_ops wmi_10_1_ops = {
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
};
static const struct wmi_ops wmi_10_2_ops = {
@@ -5353,6 +5302,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.pull_swba = ath10k_wmi_op_pull_swba_ev,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5413,6 +5363,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.pull_swba = ath10k_wmi_op_pull_swba_ev,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5452,6 +5403,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
};
int ath10k_wmi_attach(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index adf935bf0580..cad72ae76253 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -148,6 +148,8 @@ enum wmi_service {
WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
WMI_SERVICE_MDNS_OFFLOAD,
WMI_SERVICE_SAP_AUTH_OFFLOAD,
+ WMI_SERVICE_ATF,
+ WMI_SERVICE_COEX_GPIO,
/* keep last */
WMI_SERVICE_MAX,
@@ -177,6 +179,8 @@ enum wmi_10x_service {
WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
WMI_10X_SERVICE_FORCE_FW_HANG,
WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_10X_SERVICE_ATF,
+ WMI_10X_SERVICE_COEX_GPIO,
};
enum wmi_main_service {
@@ -293,6 +297,8 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
+ SVCSTR(WMI_SERVICE_ATF);
+ SVCSTR(WMI_SERVICE_COEX_GPIO);
default:
return NULL;
}
@@ -356,6 +362,10 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_FORCE_FW_HANG, len);
SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+ SVCMAP(WMI_10X_SERVICE_ATF,
+ WMI_SERVICE_ATF, len);
+ SVCMAP(WMI_10X_SERVICE_COEX_GPIO,
+ WMI_SERVICE_COEX_GPIO, len);
}
static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
@@ -552,6 +562,9 @@ struct wmi_cmd_map {
u32 gpio_output_cmdid;
u32 pdev_get_temperature_cmdid;
u32 vdev_set_wmm_params_cmdid;
+ u32 tdls_set_state_cmdid;
+ u32 tdls_peer_update_cmdid;
+ u32 adaptive_qcs_cmdid;
};
/*
@@ -1952,6 +1965,7 @@ struct wmi_resource_config_10x {
enum wmi_10_2_feature_mask {
WMI_10_2_RX_BATCH_MODE = BIT(0),
WMI_10_2_ATF_CONFIG = BIT(1),
+ WMI_10_2_COEX_GPIO = BIT(3),
};
struct wmi_resource_config_10_2 {
@@ -2166,6 +2180,7 @@ struct wmi_start_scan_arg {
u32 max_scan_time;
u32 probe_delay;
u32 scan_ctrl_flags;
+ u32 burst_duration_ms;
u32 ie_len;
u32 n_channels;
@@ -4333,6 +4348,12 @@ struct wmi_peer_create_cmd {
struct wmi_mac_addr peer_macaddr;
} __packed;
+enum wmi_peer_type {
+ WMI_PEER_TYPE_DEFAULT = 0,
+ WMI_PEER_TYPE_BSS = 1,
+ WMI_PEER_TYPE_TDLS = 2,
+};
+
struct wmi_peer_delete_cmd {
__le32 vdev_id;
struct wmi_mac_addr peer_macaddr;
@@ -4644,9 +4665,7 @@ struct wmi_peer_sta_kickout_event {
} __packed;
#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
-
-/* FIXME: empirically extrapolated */
-#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
+#define WMI_CHAN_INFO_MSEC(x) ((x) / 88000)
/* Beacon filter wmi command info */
#define BCN_FLT_MAX_SUPPORTED_IES 256
@@ -4769,6 +4788,22 @@ struct wmi_dbglog_cfg_cmd {
__le32 config_valid;
} __packed;
+enum wmi_roam_reason {
+ WMI_ROAM_REASON_BETTER_AP = 1,
+ WMI_ROAM_REASON_BEACON_MISS = 2,
+ WMI_ROAM_REASON_LOW_RSSI = 3,
+ WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+ WMI_ROAM_REASON_HO_FAILED = 5,
+
+ /* keep last */
+ WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_ev {
+ __le32 vdev_id;
+ __le32 reason;
+} __packed;
+
#define ATH10K_FRAGMT_THRESHOLD_MIN 540
#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
@@ -4857,11 +4892,200 @@ struct wmi_rdy_ev_arg {
const u8 *mac_addr;
};
+struct wmi_roam_ev_arg {
+ __le32 vdev_id;
+ __le32 reason;
+ __le32 rssi;
+};
+
struct wmi_pdev_temperature_event {
/* temperature value in Celcius degree */
__le32 temperature;
} __packed;
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+ WOW_BMISS_EVENT = 0,
+ WOW_BETTER_AP_EVENT,
+ WOW_DEAUTH_RECVD_EVENT,
+ WOW_MAGIC_PKT_RECVD_EVENT,
+ WOW_GTK_ERR_EVENT,
+ WOW_FOURWAY_HSHAKE_EVENT,
+ WOW_EAPOL_RECVD_EVENT,
+ WOW_NLO_DETECTED_EVENT,
+ WOW_DISASSOC_RECVD_EVENT,
+ WOW_PATTERN_MATCH_EVENT,
+ WOW_CSA_IE_EVENT,
+ WOW_PROBE_REQ_WPS_IE_EVENT,
+ WOW_AUTH_REQ_EVENT,
+ WOW_ASSOC_REQ_EVENT,
+ WOW_HTT_EVENT,
+ WOW_RA_MATCH_EVENT,
+ WOW_HOST_AUTO_SHUTDOWN_EVENT,
+ WOW_IOAC_MAGIC_EVENT,
+ WOW_IOAC_SHORT_EVENT,
+ WOW_IOAC_EXTEND_EVENT,
+ WOW_IOAC_TIMER_EVENT,
+ WOW_DFS_PHYERR_RADAR_EVENT,
+ WOW_BEACON_EVENT,
+ WOW_CLIENT_KICKOUT_EVENT,
+ WOW_EVENT_MAX,
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+ switch (ev) {
+ C2S(WOW_BMISS_EVENT);
+ C2S(WOW_BETTER_AP_EVENT);
+ C2S(WOW_DEAUTH_RECVD_EVENT);
+ C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+ C2S(WOW_GTK_ERR_EVENT);
+ C2S(WOW_FOURWAY_HSHAKE_EVENT);
+ C2S(WOW_EAPOL_RECVD_EVENT);
+ C2S(WOW_NLO_DETECTED_EVENT);
+ C2S(WOW_DISASSOC_RECVD_EVENT);
+ C2S(WOW_PATTERN_MATCH_EVENT);
+ C2S(WOW_CSA_IE_EVENT);
+ C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+ C2S(WOW_AUTH_REQ_EVENT);
+ C2S(WOW_ASSOC_REQ_EVENT);
+ C2S(WOW_HTT_EVENT);
+ C2S(WOW_RA_MATCH_EVENT);
+ C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+ C2S(WOW_IOAC_MAGIC_EVENT);
+ C2S(WOW_IOAC_SHORT_EVENT);
+ C2S(WOW_IOAC_EXTEND_EVENT);
+ C2S(WOW_IOAC_TIMER_EVENT);
+ C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+ C2S(WOW_BEACON_EVENT);
+ C2S(WOW_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_EVENT_MAX);
+ default:
+ return NULL;
+ }
+}
+
+enum wmi_wow_wake_reason {
+ WOW_REASON_UNSPECIFIED = -1,
+ WOW_REASON_NLOD = 0,
+ WOW_REASON_AP_ASSOC_LOST,
+ WOW_REASON_LOW_RSSI,
+ WOW_REASON_DEAUTH_RECVD,
+ WOW_REASON_DISASSOC_RECVD,
+ WOW_REASON_GTK_HS_ERR,
+ WOW_REASON_EAP_REQ,
+ WOW_REASON_FOURWAY_HS_RECV,
+ WOW_REASON_TIMER_INTR_RECV,
+ WOW_REASON_PATTERN_MATCH_FOUND,
+ WOW_REASON_RECV_MAGIC_PATTERN,
+ WOW_REASON_P2P_DISC,
+ WOW_REASON_WLAN_HB,
+ WOW_REASON_CSA_EVENT,
+ WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+ WOW_REASON_AUTH_REQ_RECV,
+ WOW_REASON_ASSOC_REQ_RECV,
+ WOW_REASON_HTT_EVENT,
+ WOW_REASON_RA_MATCH,
+ WOW_REASON_HOST_AUTO_SHUTDOWN,
+ WOW_REASON_IOAC_MAGIC_EVENT,
+ WOW_REASON_IOAC_SHORT_EVENT,
+ WOW_REASON_IOAC_EXTEND_EVENT,
+ WOW_REASON_IOAC_TIMER_EVENT,
+ WOW_REASON_ROAM_HO,
+ WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+ WOW_REASON_BEACON_RECV,
+ WOW_REASON_CLIENT_KICKOUT_EVENT,
+ WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+ switch (reason) {
+ C2S(WOW_REASON_UNSPECIFIED);
+ C2S(WOW_REASON_NLOD);
+ C2S(WOW_REASON_AP_ASSOC_LOST);
+ C2S(WOW_REASON_LOW_RSSI);
+ C2S(WOW_REASON_DEAUTH_RECVD);
+ C2S(WOW_REASON_DISASSOC_RECVD);
+ C2S(WOW_REASON_GTK_HS_ERR);
+ C2S(WOW_REASON_EAP_REQ);
+ C2S(WOW_REASON_FOURWAY_HS_RECV);
+ C2S(WOW_REASON_TIMER_INTR_RECV);
+ C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+ C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+ C2S(WOW_REASON_P2P_DISC);
+ C2S(WOW_REASON_WLAN_HB);
+ C2S(WOW_REASON_CSA_EVENT);
+ C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+ C2S(WOW_REASON_AUTH_REQ_RECV);
+ C2S(WOW_REASON_ASSOC_REQ_RECV);
+ C2S(WOW_REASON_HTT_EVENT);
+ C2S(WOW_REASON_RA_MATCH);
+ C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+ C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+ C2S(WOW_REASON_IOAC_SHORT_EVENT);
+ C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+ C2S(WOW_REASON_IOAC_TIMER_EVENT);
+ C2S(WOW_REASON_ROAM_HO);
+ C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+ C2S(WOW_REASON_BEACON_RECV);
+ C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_REASON_DEBUG_TEST);
+ default:
+ return NULL;
+ }
+}
+
+#undef C2S
+
+struct wmi_wow_ev_arg {
+ u32 vdev_id;
+ u32 flag;
+ enum wmi_wow_wake_reason wake_reason;
+ u32 data_len;
+};
+
+#define WOW_MIN_PATTERN_SIZE 1
+#define WOW_MAX_PATTERN_SIZE 148
+#define WOW_MAX_PKT_OFFSET 128
+
+enum wmi_tdls_state {
+ WMI_TDLS_DISABLE,
+ WMI_TDLS_ENABLE_PASSIVE,
+ WMI_TDLS_ENABLE_ACTIVE,
+};
+
+enum wmi_tdls_peer_state {
+ WMI_TDLS_PEER_STATE_PEERING,
+ WMI_TDLS_PEER_STATE_CONNECTED,
+ WMI_TDLS_PEER_STATE_TEARDOWN,
+};
+
+struct wmi_tdls_peer_update_cmd_arg {
+ u32 vdev_id;
+ enum wmi_tdls_peer_state peer_state;
+ u8 addr[ETH_ALEN];
+};
+
+#define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32
+
+struct wmi_tdls_peer_capab_arg {
+ u8 peer_uapsd_queues;
+ u8 peer_max_sp;
+ u32 buff_sta_support;
+ u32 off_chan_support;
+ u32 peer_curr_operclass;
+ u32 self_curr_operclass;
+ u32 peer_chan_len;
+ u32 peer_operclass_len;
+ u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+ u32 is_peer_responder;
+ u32 pref_offchan_num;
+ u32 pref_offchan_bw;
+};
+
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
new file mode 100644
index 000000000000..a68d8fd853a3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include "hif.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+
+static const struct wiphy_wowlan_support ath10k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_MAGIC_PKT,
+ .pattern_min_len = WOW_MIN_PATTERN_SIZE,
+ .pattern_max_len = WOW_MAX_PATTERN_SIZE,
+ .max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
+
+static int ath10k_wow_vif_cleanup(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int i, ret;
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+ if (ret) {
+ ath10k_warn(ar, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ar->wow.max_num_patterns; i++) {
+ ret = ath10k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+ if (ret) {
+ ath10k_warn(ar, "failed to delete wow pattern %d for vdev %i: %d\n",
+ i, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_cleanup(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_wow_vif_cleanup(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to clean wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
+ struct cfg80211_wowlan *wowlan)
+{
+ int ret, i;
+ unsigned long wow_mask = 0;
+ struct ath10k *ar = arvif->ar;
+ const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ int pattern_id = 0;
+
+ /* Setup requested WOW features */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_IBSS:
+ __set_bit(WOW_BEACON_EVENT, &wow_mask);
+ /* fall through */
+ case WMI_VDEV_TYPE_AP:
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+ __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_HTT_EVENT, &wow_mask);
+ __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (wowlan->disconnect) {
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_BMISS_EVENT, &wow_mask);
+ __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+ }
+
+ if (wowlan->magic_pkt)
+ __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ int j;
+
+ if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
+ continue;
+
+ /* convert bytemask to bitmask */
+ for (j = 0; j < patterns[i].pattern_len; j++)
+ if (patterns[i].mask[j / 8] & BIT(j % 8))
+ bitmask[j] = 0xff;
+
+ ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+ pattern_id,
+ patterns[i].pattern,
+ bitmask,
+ patterns[i].pattern_len,
+ patterns[i].pkt_offset);
+ if (ret) {
+ ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
+ pattern_id,
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ pattern_id++;
+ __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+ }
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ if (!test_bit(i, &wow_mask))
+ continue;
+ ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable wakeup event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_set_wakeups(struct ath10k *ar,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath10k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_vif_wow_set_wakeups(arvif, wowlan);
+ if (ret) {
+ ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_enable(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->target_suspend);
+
+ ret = ath10k_wmi_wow_enable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to issue wow enable: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->target_suspend, 3 * HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "timed out while waiting for suspend completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_wakeup(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->wow.wakeup_completed);
+
+ ret = ath10k_wmi_wow_host_wakeup_ind(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to send wow wakeup indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->fw_features))) {
+ ret = 1;
+ goto exit;
+ }
+
+ ret = ath10k_wow_cleanup(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath10k_wow_set_wakeups(ar, wowlan);
+ if (ret) {
+ ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath10k_wow_enable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to start wow: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath10k_hif_suspend(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
+ goto wakeup;
+ }
+
+ goto exit;
+
+wakeup:
+ ath10k_wow_wakeup(ar);
+
+cleanup:
+ ath10k_wow_cleanup(ar);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+int ath10k_wow_op_resume(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->fw_features))) {
+ ret = 1;
+ goto exit;
+ }
+
+ ret = ath10k_hif_resume(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to resume hif: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_wow_wakeup(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+int ath10k_wow_init(struct ath10k *ar)
+{
+ if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
+ return 0;
+
+ if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
+ return -EINVAL;
+
+ ar->wow.wowlan_support = ath10k_wowlan_support;
+ ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+ ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h
new file mode 100644
index 000000000000..abbb04b6d1bd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wow.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _WOW_H_
+#define _WOW_H_
+
+struct ath10k_wow {
+ u32 max_num_patterns;
+ struct completion wakeup_completed;
+ struct wiphy_wowlan_support wowlan_support;
+};
+
+#ifdef CONFIG_PM
+
+int ath10k_wow_init(struct ath10k *ar);
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath10k_wow_op_resume(struct ieee80211_hw *hw);
+
+#else
+
+static inline int ath10k_wow_init(struct ath10k *ar)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+#endif /* _WOW_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 7ca0d6f930fd..e22b0e778927 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1280,7 +1280,6 @@ struct ath5k_hw {
DECLARE_BITMAP(status, 4);
#define ATH_STAT_INVALID 0 /* disable hardware accesses */
-#define ATH_STAT_PROMISC 1
#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */
#define ATH_STAT_STARTED 3 /* opened & irqs enabled */
#define ATH_STAT_RESET 4 /* hw reset */
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index ca4b7ccd697f..803030fd17d3 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -124,7 +124,7 @@ ath5k_led_brightness_set(struct led_classdev *led_dev,
static int
ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led,
- const char *name, char *trigger)
+ const char *name, const char *trigger)
{
int err;
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 3b4a6463d87a..dc44cfef7517 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -369,7 +369,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
unsigned int *new_flags, u64 multicast)
{
#define SUPPORTED_FIF_FLAGS \
- (FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \
+ (FIF_ALLMULTI | FIF_FCSFAIL | \
FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
FIF_BCN_PRBRESP_PROMISC)
@@ -393,16 +393,6 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
(AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
AR5K_RX_FILTER_MCAST);
- if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
- if (*new_flags & FIF_PROMISC_IN_BSS)
- __set_bit(ATH_STAT_PROMISC, ah->status);
- else
- __clear_bit(ATH_STAT_PROMISC, ah->status);
- }
-
- if (test_bit(ATH_STAT_PROMISC, ah->status))
- rfilt |= AR5K_RX_FILTER_PROM;
-
/* Note, AR5K_RX_FILTER_MCAST is already enabled */
if (*new_flags & FIF_ALLMULTI) {
mfilt[0] = ~0;
@@ -418,8 +408,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (ah->nvifs > 1))
rfilt |= AR5K_RX_FILTER_BEACON;
- /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
- * set we should only pass on control frames for this
+ /* FIF_CONTROL doc says we should only pass on control frames for this
* station. This needs testing. I believe right now this
* enables *all* control frames, which is OK.. but
* but we should see if we can improve on granularity */
@@ -809,7 +798,6 @@ const struct ieee80211_ops ath5k_hw_ops = {
.sw_scan_start = ath5k_sw_scan_start,
.sw_scan_complete = ath5k_sw_scan_complete,
.get_stats = ath5k_get_stats,
- /* .get_tkip_seq = not implemented */
/* .set_frag_threshold = not implemented */
/* .set_rts_threshold = not implemented */
/* .sta_add = not implemented */
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index cce4625a53ad..a511ef3614b9 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -889,7 +889,7 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
GFP_KERNEL);
} else if (vif->sme_state == SME_CONNECTED) {
cfg80211_disconnected(vif->ndev, proto_reason,
- NULL, 0, GFP_KERNEL);
+ NULL, 0, false, GFP_KERNEL);
}
vif->sme_state = SME_DISCONNECTED;
@@ -3467,7 +3467,7 @@ void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
GFP_KERNEL);
break;
case SME_CONNECTED:
- cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL);
+ cfg80211_disconnected(vif->ndev, 0, NULL, 0, true, GFP_KERNEL);
break;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 6c23d279525f..8f8793004b9f 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -254,86 +254,25 @@ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
return 0;
}
-/**
- * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
- * @ah: atheros hardware structure
- * @chan:
- *
- * For non single-chip solutions. Converts to baseband spur frequency given the
- * input channel frequency and compute register settings below.
- */
-static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
- struct ath9k_channel *chan)
+void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan, int bin)
{
- int bb_spur = AR_NO_SPUR;
- int bin, cur_bin;
- int spur_freq_sd;
- int spur_delta_phase;
- int denominator;
+ int cur_bin;
int upper, lower, cur_vit_mask;
- int tmp, new;
int i;
- static int pilot_mask_reg[4] = {
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int8_t mask_amt;
+ int tmp_mask;
+ static const int pilot_mask_reg[4] = {
AR_PHY_TIMING7, AR_PHY_TIMING8,
AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
};
- static int chan_mask_reg[4] = {
+ static const int chan_mask_reg[4] = {
AR_PHY_TIMING9, AR_PHY_TIMING10,
AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
};
- static int inc[4] = { 0, 100, 0, 0 };
-
- int8_t mask_m[123];
- int8_t mask_p[123];
- int8_t mask_amt;
- int tmp_mask;
- int cur_bb_spur;
- bool is2GHz = IS_CHAN_2GHZ(chan);
-
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
- for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
- if (AR_NO_SPUR == cur_bb_spur)
- break;
- cur_bb_spur = cur_bb_spur - (chan->channel * 10);
- if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
- bb_spur = cur_bb_spur;
- break;
- }
- }
-
- if (AR_NO_SPUR == bb_spur)
- return;
-
- bin = bb_spur * 32;
-
- tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
- new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
- AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
- AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
- AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
-
- REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
-
- new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
- AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
- AR_PHY_SPUR_REG_MASK_RATE_SELECT |
- AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
- SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
- REG_WRITE(ah, AR_PHY_SPUR_REG, new);
-
- spur_delta_phase = ((bb_spur * 524288) / 100) &
- AR_PHY_TIMING11_SPUR_DELTA_PHASE;
-
- denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
- spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
-
- new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
- SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
- SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
- REG_WRITE(ah, AR_PHY_TIMING11, new);
+ static const int inc[4] = { 0, 100, 0, 0 };
cur_bin = -6000;
upper = bin + 100;
@@ -343,6 +282,7 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
int pilot_mask = 0;
int chan_mask = 0;
int bp = 0;
+
for (bp = 0; bp < 30; bp++) {
if ((cur_bin > lower) && (cur_bin < upper)) {
pilot_mask = pilot_mask | 0x1 << bp;
@@ -361,7 +301,6 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
for (i = 0; i < 123; i++) {
if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
/* workaround for gcc bug #37014 */
volatile int tmp_v = abs(cur_vit_mask - bin);
@@ -467,6 +406,78 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
}
/**
+ * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For non single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int bb_spur = AR_NO_SPUR;
+ int bin;
+ int spur_freq_sd;
+ int spur_delta_phase;
+ int denominator;
+ int tmp, new;
+ int i;
+
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+ cur_bb_spur = cur_bb_spur - (chan->channel * 10);
+ if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ }
+
+ if (AR_NO_SPUR == bb_spur)
+ return;
+
+ bin = bb_spur * 32;
+
+ tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+ new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+ AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+ AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+ AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
+
+ new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+ AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+ AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+ SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+ REG_WRITE(ah, AR_PHY_SPUR_REG, new);
+
+ spur_delta_phase = ((bb_spur * 524288) / 100) &
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+ denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
+ spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
+
+ new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+ SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+ SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+ REG_WRITE(ah, AR_PHY_TIMING11, new);
+
+ ar5008_hw_cmn_spur_mitigate(ah, chan, bin);
+}
+
+/**
* ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
* @ah: atheros hardware structure
*
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index fc08162b5820..db6624527d99 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -169,29 +169,17 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
{
int bb_spur = AR_NO_SPUR;
int freq;
- int bin, cur_bin;
+ int bin;
int bb_spur_off, spur_subchannel_sd;
int spur_freq_sd;
int spur_delta_phase;
int denominator;
- int upper, lower, cur_vit_mask;
int tmp, newVal;
int i;
- static const int pilot_mask_reg[4] = {
- AR_PHY_TIMING7, AR_PHY_TIMING8,
- AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
- };
- static const int chan_mask_reg[4] = {
- AR_PHY_TIMING9, AR_PHY_TIMING10,
- AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
- };
- static const int inc[4] = { 0, 100, 0, 0 };
struct chan_centers centers;
int8_t mask_m[123];
int8_t mask_p[123];
- int8_t mask_amt;
- int tmp_mask;
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
@@ -288,135 +276,7 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
- cur_bin = -6000;
- upper = bin + 100;
- lower = bin - 100;
-
- for (i = 0; i < 4; i++) {
- int pilot_mask = 0;
- int chan_mask = 0;
- int bp = 0;
- for (bp = 0; bp < 30; bp++) {
- if ((cur_bin > lower) && (cur_bin < upper)) {
- pilot_mask = pilot_mask | 0x1 << bp;
- chan_mask = chan_mask | 0x1 << bp;
- }
- cur_bin += 100;
- }
- cur_bin += inc[i];
- REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
- REG_WRITE(ah, chan_mask_reg[i], chan_mask);
- }
-
- cur_vit_mask = 6100;
- upper = bin + 120;
- lower = bin - 120;
-
- for (i = 0; i < 123; i++) {
- if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
- /* workaround for gcc bug #37014 */
- volatile int tmp_v = abs(cur_vit_mask - bin);
-
- if (tmp_v < 75)
- mask_amt = 1;
- else
- mask_amt = 0;
- if (cur_vit_mask < 0)
- mask_m[abs(cur_vit_mask / 100)] = mask_amt;
- else
- mask_p[cur_vit_mask / 100] = mask_amt;
- }
- cur_vit_mask -= 100;
- }
-
- tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
- | (mask_m[48] << 26) | (mask_m[49] << 24)
- | (mask_m[50] << 22) | (mask_m[51] << 20)
- | (mask_m[52] << 18) | (mask_m[53] << 16)
- | (mask_m[54] << 14) | (mask_m[55] << 12)
- | (mask_m[56] << 10) | (mask_m[57] << 8)
- | (mask_m[58] << 6) | (mask_m[59] << 4)
- | (mask_m[60] << 2) | (mask_m[61] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
-
- tmp_mask = (mask_m[31] << 28)
- | (mask_m[32] << 26) | (mask_m[33] << 24)
- | (mask_m[34] << 22) | (mask_m[35] << 20)
- | (mask_m[36] << 18) | (mask_m[37] << 16)
- | (mask_m[48] << 14) | (mask_m[39] << 12)
- | (mask_m[40] << 10) | (mask_m[41] << 8)
- | (mask_m[42] << 6) | (mask_m[43] << 4)
- | (mask_m[44] << 2) | (mask_m[45] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
-
- tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
- | (mask_m[18] << 26) | (mask_m[18] << 24)
- | (mask_m[20] << 22) | (mask_m[20] << 20)
- | (mask_m[22] << 18) | (mask_m[22] << 16)
- | (mask_m[24] << 14) | (mask_m[24] << 12)
- | (mask_m[25] << 10) | (mask_m[26] << 8)
- | (mask_m[27] << 6) | (mask_m[28] << 4)
- | (mask_m[29] << 2) | (mask_m[30] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
-
- tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
- | (mask_m[2] << 26) | (mask_m[3] << 24)
- | (mask_m[4] << 22) | (mask_m[5] << 20)
- | (mask_m[6] << 18) | (mask_m[7] << 16)
- | (mask_m[8] << 14) | (mask_m[9] << 12)
- | (mask_m[10] << 10) | (mask_m[11] << 8)
- | (mask_m[12] << 6) | (mask_m[13] << 4)
- | (mask_m[14] << 2) | (mask_m[15] << 0);
- REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
-
- tmp_mask = (mask_p[15] << 28)
- | (mask_p[14] << 26) | (mask_p[13] << 24)
- | (mask_p[12] << 22) | (mask_p[11] << 20)
- | (mask_p[10] << 18) | (mask_p[9] << 16)
- | (mask_p[8] << 14) | (mask_p[7] << 12)
- | (mask_p[6] << 10) | (mask_p[5] << 8)
- | (mask_p[4] << 6) | (mask_p[3] << 4)
- | (mask_p[2] << 2) | (mask_p[1] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
-
- tmp_mask = (mask_p[30] << 28)
- | (mask_p[29] << 26) | (mask_p[28] << 24)
- | (mask_p[27] << 22) | (mask_p[26] << 20)
- | (mask_p[25] << 18) | (mask_p[24] << 16)
- | (mask_p[23] << 14) | (mask_p[22] << 12)
- | (mask_p[21] << 10) | (mask_p[20] << 8)
- | (mask_p[19] << 6) | (mask_p[18] << 4)
- | (mask_p[17] << 2) | (mask_p[16] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
-
- tmp_mask = (mask_p[45] << 28)
- | (mask_p[44] << 26) | (mask_p[43] << 24)
- | (mask_p[42] << 22) | (mask_p[41] << 20)
- | (mask_p[40] << 18) | (mask_p[39] << 16)
- | (mask_p[38] << 14) | (mask_p[37] << 12)
- | (mask_p[36] << 10) | (mask_p[35] << 8)
- | (mask_p[34] << 6) | (mask_p[33] << 4)
- | (mask_p[32] << 2) | (mask_p[31] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
-
- tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
- | (mask_p[59] << 26) | (mask_p[58] << 24)
- | (mask_p[57] << 22) | (mask_p[56] << 20)
- | (mask_p[55] << 18) | (mask_p[54] << 16)
- | (mask_p[53] << 14) | (mask_p[52] << 12)
- | (mask_p[51] << 10) | (mask_p[50] << 8)
- | (mask_p[49] << 6) | (mask_p[48] << 4)
- | (mask_p[47] << 2) | (mask_p[46] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+ ar5008_hw_cmn_spur_mitigate(ah, chan, bin);
REGWRITE_BUFFER_FLUSH(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 5cee231cca1f..a8762711ad74 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -15,6 +15,7 @@
*/
#include <linux/relay.h>
+#include <linux/random.h>
#include "ath9k.h"
static s8 fix_rssi_inv_only(u8 rssi_val)
@@ -36,21 +37,480 @@ static void ath_debug_send_fft_sample(struct ath_spec_scan_priv *spec_priv,
relay_write(spec_priv->rfs_chan_spec_scan, fft_sample_tlv, length);
}
+typedef int (ath_cmn_fft_idx_validator) (u8 *sample_end, int bytes_read);
+
+static int
+ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read)
+{
+ struct ath_ht20_mag_info *mag_info;
+ u8 *sample;
+ u16 max_magnitude;
+ u8 max_index;
+ u8 max_exp;
+
+ /* Sanity check so that we don't read outside the read
+ * buffer
+ */
+ if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN - 1)
+ return -1;
+
+ mag_info = (struct ath_ht20_mag_info *) (sample_end -
+ sizeof(struct ath_ht20_mag_info) + 1);
+
+ sample = sample_end - SPECTRAL_HT20_SAMPLE_LEN + 1;
+
+ max_index = spectral_max_index(mag_info->all_bins,
+ SPECTRAL_HT20_NUM_BINS);
+ max_magnitude = spectral_max_magnitude(mag_info->all_bins);
+
+ max_exp = mag_info->max_exp & 0xf;
+
+ /* Don't try to read something outside the read buffer
+ * in case of a missing byte (so bins[0] will be outside
+ * the read buffer)
+ */
+ if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN && max_index < 1)
+ return -1;
+
+ if (sample[max_index] != (max_magnitude >> max_exp))
+ return -1;
+ else
+ return 0;
+}
+
+static int
+ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read)
+{
+ struct ath_ht20_40_mag_info *mag_info;
+ u8 *sample;
+ u16 lower_mag, upper_mag;
+ u8 lower_max_index, upper_max_index;
+ u8 max_exp;
+ int dc_pos = SPECTRAL_HT20_40_NUM_BINS / 2;
+
+ /* Sanity check so that we don't read outside the read
+ * buffer
+ */
+ if (bytes_read < SPECTRAL_HT20_40_SAMPLE_LEN - 1)
+ return -1;
+
+ mag_info = (struct ath_ht20_40_mag_info *) (sample_end -
+ sizeof(struct ath_ht20_40_mag_info) + 1);
+
+ sample = sample_end - SPECTRAL_HT20_40_SAMPLE_LEN + 1;
+
+ lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+ lower_max_index = spectral_max_index(mag_info->lower_bins,
+ SPECTRAL_HT20_40_NUM_BINS);
+
+ upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+ upper_max_index = spectral_max_index(mag_info->upper_bins,
+ SPECTRAL_HT20_40_NUM_BINS);
+
+ max_exp = mag_info->max_exp & 0xf;
+
+ /* Don't try to read something outside the read buffer
+ * in case of a missing byte (so bins[0] will be outside
+ * the read buffer)
+ */
+ if (bytes_read < SPECTRAL_HT20_40_SAMPLE_LEN &&
+ ((upper_max_index < 1) || (lower_max_index < 1)))
+ return -1;
+
+ /* Some time hardware messes up the index and adds
+ * the index of the middle point (dc_pos). Try to fix it.
+ */
+ if ((upper_max_index - dc_pos > 0) &&
+ (sample[upper_max_index] == (upper_mag >> max_exp)))
+ upper_max_index -= dc_pos;
+
+ if ((lower_max_index - dc_pos > 0) &&
+ (sample[lower_max_index - dc_pos] == (lower_mag >> max_exp)))
+ lower_max_index -= dc_pos;
+
+ if ((sample[upper_max_index + dc_pos] != (upper_mag >> max_exp)) ||
+ (sample[lower_max_index] != (lower_mag >> max_exp)))
+ return -1;
+ else
+ return 0;
+}
+
+typedef int (ath_cmn_fft_sample_handler) (struct ath_rx_status *rs,
+ struct ath_spec_scan_priv *spec_priv,
+ u8 *sample_buf, u64 tsf, u16 freq, int chan_type);
+
+static int
+ath_cmn_process_ht20_fft(struct ath_rx_status *rs,
+ struct ath_spec_scan_priv *spec_priv,
+ u8 *sample_buf,
+ u64 tsf, u16 freq, int chan_type)
+{
+ struct fft_sample_ht20 fft_sample_20;
+ struct ath_common *common = ath9k_hw_common(spec_priv->ah);
+ struct ath_hw *ah = spec_priv->ah;
+ struct ath_ht20_mag_info *mag_info;
+ struct fft_sample_tlv *tlv;
+ int i = 0;
+ int ret = 0;
+ int dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
+ u16 magnitude, tmp_mag, length;
+ u8 max_index, bitmap_w, max_exp;
+
+ length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
+ fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
+ fft_sample_20.tlv.length = __cpu_to_be16(length);
+ fft_sample_20.freq = __cpu_to_be16(freq);
+ fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+ fft_sample_20.noise = ah->noise;
+
+ mag_info = (struct ath_ht20_mag_info *) (sample_buf +
+ SPECTRAL_HT20_NUM_BINS);
+
+ magnitude = spectral_max_magnitude(mag_info->all_bins);
+ fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+
+ max_index = spectral_max_index(mag_info->all_bins,
+ SPECTRAL_HT20_NUM_BINS);
+ fft_sample_20.max_index = max_index;
+
+ bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
+ fft_sample_20.bitmap_weight = bitmap_w;
+
+ max_exp = mag_info->max_exp & 0xf;
+ fft_sample_20.max_exp = max_exp;
+
+ fft_sample_20.tsf = __cpu_to_be64(tsf);
+
+ memcpy(fft_sample_20.data, sample_buf, SPECTRAL_HT20_NUM_BINS);
+
+ ath_dbg(common, SPECTRAL_SCAN, "FFT HT20 frame: max mag 0x%X,"
+ "max_mag_idx %i\n",
+ magnitude >> max_exp,
+ max_index);
+
+ if (fft_sample_20.data[max_index] != (magnitude >> max_exp)) {
+ ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
+ ret = -1;
+ }
+
+ /* DC value (value in the middle) is the blind spot of the spectral
+ * sample and invalid, interpolate it.
+ */
+ fft_sample_20.data[dc_pos] = (fft_sample_20.data[dc_pos + 1] +
+ fft_sample_20.data[dc_pos - 1]) / 2;
+
+ /* Check if the maximum magnitude is indeed maximum,
+ * also if the maximum value was at dc_pos, calculate
+ * a new one (since value at dc_pos is invalid).
+ */
+ if (max_index == dc_pos) {
+ tmp_mag = 0;
+ for (i = 0; i < dc_pos; i++) {
+ if (fft_sample_20.data[i] > tmp_mag) {
+ tmp_mag = fft_sample_20.data[i];
+ fft_sample_20.max_index = i;
+ }
+ }
+
+ magnitude = tmp_mag << max_exp;
+ fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Calculated new lower max 0x%X at %i\n",
+ tmp_mag, fft_sample_20.max_index);
+ } else
+ for (i = 0; i < SPECTRAL_HT20_NUM_BINS; i++) {
+ if (fft_sample_20.data[i] == (magnitude >> max_exp))
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Got max: 0x%X at index %i\n",
+ fft_sample_20.data[i], i);
+
+ if (fft_sample_20.data[i] > (magnitude >> max_exp)) {
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Got bin %i greater than max: 0x%X\n",
+ i, fft_sample_20.data[i]);
+ ret = -1;
+ }
+ }
+
+ if (ret < 0)
+ return ret;
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_20;
+
+ ath_debug_send_fft_sample(spec_priv, tlv);
+
+ return 0;
+}
+
+static int
+ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
+ struct ath_spec_scan_priv *spec_priv,
+ u8 *sample_buf,
+ u64 tsf, u16 freq, int chan_type)
+{
+ struct fft_sample_ht20_40 fft_sample_40;
+ struct ath_common *common = ath9k_hw_common(spec_priv->ah);
+ struct ath_hw *ah = spec_priv->ah;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath_ht20_40_mag_info *mag_info;
+ struct fft_sample_tlv *tlv;
+ int dc_pos = SPECTRAL_HT20_40_NUM_BINS / 2;
+ int i = 0;
+ int ret = 0;
+ s16 ext_nf;
+ u16 lower_mag, upper_mag, tmp_mag, length;
+ s8 lower_rssi, upper_rssi;
+ u8 lower_max_index, upper_max_index;
+ u8 lower_bitmap_w, upper_bitmap_w, max_exp;
+
+ if (caldata)
+ ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
+ caldata->nfCalHist[3].privNF);
+ else
+ ext_nf = ATH_DEFAULT_NOISE_FLOOR;
+
+ length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
+ fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
+ fft_sample_40.tlv.length = __cpu_to_be16(length);
+ fft_sample_40.freq = __cpu_to_be16(freq);
+ fft_sample_40.channel_type = chan_type;
+
+ if (chan_type == NL80211_CHAN_HT40PLUS) {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+
+ fft_sample_40.lower_noise = ah->noise;
+ fft_sample_40.upper_noise = ext_nf;
+ } else {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+
+ fft_sample_40.lower_noise = ext_nf;
+ fft_sample_40.upper_noise = ah->noise;
+ }
+
+ fft_sample_40.lower_rssi = lower_rssi;
+ fft_sample_40.upper_rssi = upper_rssi;
+
+ mag_info = (struct ath_ht20_40_mag_info *) (sample_buf +
+ SPECTRAL_HT20_40_NUM_BINS);
+
+ lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+ fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+
+ upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+ fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+
+ lower_max_index = spectral_max_index(mag_info->lower_bins,
+ SPECTRAL_HT20_40_NUM_BINS);
+ fft_sample_40.lower_max_index = lower_max_index;
+
+ upper_max_index = spectral_max_index(mag_info->upper_bins,
+ SPECTRAL_HT20_40_NUM_BINS);
+ fft_sample_40.upper_max_index = upper_max_index;
+
+ lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
+ fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
+
+ upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
+ fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
+
+ max_exp = mag_info->max_exp & 0xf;
+ fft_sample_40.max_exp = max_exp;
+
+ fft_sample_40.tsf = __cpu_to_be64(tsf);
+
+ memcpy(fft_sample_40.data, sample_buf, SPECTRAL_HT20_40_NUM_BINS);
+
+ ath_dbg(common, SPECTRAL_SCAN, "FFT HT20/40 frame: lower mag 0x%X,"
+ "lower_mag_idx %i, upper mag 0x%X,"
+ "upper_mag_idx %i\n",
+ lower_mag >> max_exp,
+ lower_max_index,
+ upper_mag >> max_exp,
+ upper_max_index);
+
+ /* Some time hardware messes up the index and adds
+ * the index of the middle point (dc_pos). Try to fix it.
+ */
+ if ((upper_max_index - dc_pos > 0) &&
+ (fft_sample_40.data[upper_max_index] == (upper_mag >> max_exp))) {
+ upper_max_index -= dc_pos;
+ fft_sample_40.upper_max_index = upper_max_index;
+ }
+
+ if ((lower_max_index - dc_pos > 0) &&
+ (fft_sample_40.data[lower_max_index - dc_pos] ==
+ (lower_mag >> max_exp))) {
+ lower_max_index -= dc_pos;
+ fft_sample_40.lower_max_index = lower_max_index;
+ }
+
+ /* Check if we got the expected magnitude values at
+ * the expected bins
+ */
+ if ((fft_sample_40.data[upper_max_index + dc_pos]
+ != (upper_mag >> max_exp)) ||
+ (fft_sample_40.data[lower_max_index]
+ != (lower_mag >> max_exp))) {
+ ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
+ ret = -1;
+ }
+
+ /* DC value (value in the middle) is the blind spot of the spectral
+ * sample and invalid, interpolate it.
+ */
+ fft_sample_40.data[dc_pos] = (fft_sample_40.data[dc_pos + 1] +
+ fft_sample_40.data[dc_pos - 1]) / 2;
+
+ /* Check if the maximum magnitudes are indeed maximum,
+ * also if the maximum value was at dc_pos, calculate
+ * a new one (since value at dc_pos is invalid).
+ */
+ if (lower_max_index == dc_pos) {
+ tmp_mag = 0;
+ for (i = 0; i < dc_pos; i++) {
+ if (fft_sample_40.data[i] > tmp_mag) {
+ tmp_mag = fft_sample_40.data[i];
+ fft_sample_40.lower_max_index = i;
+ }
+ }
+
+ lower_mag = tmp_mag << max_exp;
+ fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Calculated new lower max 0x%X at %i\n",
+ tmp_mag, fft_sample_40.lower_max_index);
+ } else
+ for (i = 0; i < dc_pos; i++) {
+ if (fft_sample_40.data[i] == (lower_mag >> max_exp))
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Got lower mag: 0x%X at index %i\n",
+ fft_sample_40.data[i], i);
+
+ if (fft_sample_40.data[i] > (lower_mag >> max_exp)) {
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Got lower bin %i higher than max: 0x%X\n",
+ i, fft_sample_40.data[i]);
+ ret = -1;
+ }
+ }
+
+ if (upper_max_index == dc_pos) {
+ tmp_mag = 0;
+ for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
+ if (fft_sample_40.data[i] > tmp_mag) {
+ tmp_mag = fft_sample_40.data[i];
+ fft_sample_40.upper_max_index = i;
+ }
+ }
+ upper_mag = tmp_mag << max_exp;
+ fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Calculated new upper max 0x%X at %i\n",
+ tmp_mag, i);
+ } else
+ for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
+ if (fft_sample_40.data[i] == (upper_mag >> max_exp))
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Got upper mag: 0x%X at index %i\n",
+ fft_sample_40.data[i], i);
+
+ if (fft_sample_40.data[i] > (upper_mag >> max_exp)) {
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Got upper bin %i higher than max: 0x%X\n",
+ i, fft_sample_40.data[i]);
+
+ ret = -1;
+ }
+ }
+
+ if (ret < 0)
+ return ret;
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_40;
+
+ ath_debug_send_fft_sample(spec_priv, tlv);
+
+ return 0;
+}
+
+static inline void
+ath_cmn_copy_fft_frame(u8 *in, u8 *out, int sample_len, int sample_bytes)
+{
+ switch (sample_bytes - sample_len) {
+ case -1:
+ /* First byte missing */
+ memcpy(&out[1], in,
+ sample_len - 1);
+ break;
+ case 0:
+ /* Length correct, nothing to do. */
+ memcpy(out, in, sample_len);
+ break;
+ case 1:
+ /* MAC added 2 extra bytes AND first byte
+ * is missing.
+ */
+ memcpy(&out[1], in, 30);
+ out[31] = in[31];
+ memcpy(&out[32], &in[33],
+ sample_len - 32);
+ break;
+ case 2:
+ /* MAC added 2 extra bytes at bin 30 and 32,
+ * remove them.
+ */
+ memcpy(out, in, 30);
+ out[30] = in[31];
+ memcpy(&out[31], &in[33],
+ sample_len - 31);
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv)
+{
+ int i = 0;
+ int ret = 0;
+ struct rchan *rc = spec_priv->rfs_chan_spec_scan;
+
+ for_each_online_cpu(i)
+ ret += relay_buf_full(rc->buf[i]);
+
+ i = num_online_cpus();
+
+ if (ret == i)
+ return 1;
+ else
+ return 0;
+}
+
/* returns 1 if this was a spectral frame, even if not handled. */
int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr,
struct ath_rx_status *rs, u64 tsf)
{
+ u8 sample_buf[SPECTRAL_SAMPLE_MAX_LEN] = {0};
struct ath_hw *ah = spec_priv->ah;
struct ath_common *common = ath9k_hw_common(spec_priv->ah);
- u8 num_bins, *bins, *vdata = (u8 *)hdr;
- struct fft_sample_ht20 fft_sample_20;
- struct fft_sample_ht20_40 fft_sample_40;
- struct fft_sample_tlv *tlv;
+ u8 num_bins, *vdata = (u8 *)hdr;
struct ath_radar_info *radar_info;
int len = rs->rs_datalen;
- int dc_pos;
- u16 fft_len, length, freq = ah->curchan->chan->center_freq;
+ int i;
+ int got_slen = 0;
+ u8 *sample_start;
+ int sample_bytes = 0;
+ int ret = 0;
+ u16 fft_len, sample_len, freq = ah->curchan->chan->center_freq;
enum nl80211_channel_type chan_type;
+ ath_cmn_fft_idx_validator *fft_idx_validator;
+ ath_cmn_fft_sample_handler *fft_handler;
/* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
* via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
@@ -68,140 +528,170 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
return 0;
+ /* Output buffers are full, no need to process anything
+ * since there is no space to put the result anyway
+ */
+ ret = ath_cmn_is_fft_buf_full(spec_priv);
+ if (ret == 1) {
+ ath_dbg(common, SPECTRAL_SCAN, "FFT report ignored, no space "
+ "left on output buffers\n");
+ return 1;
+ }
+
chan_type = cfg80211_get_chandef_type(&common->hw->conf.chandef);
if ((chan_type == NL80211_CHAN_HT40MINUS) ||
(chan_type == NL80211_CHAN_HT40PLUS)) {
fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
+ sample_len = SPECTRAL_HT20_40_SAMPLE_LEN;
num_bins = SPECTRAL_HT20_40_NUM_BINS;
- bins = (u8 *)fft_sample_40.data;
+ fft_idx_validator = &ath_cmn_max_idx_verify_ht20_40_fft;
+ fft_handler = &ath_cmn_process_ht20_40_fft;
} else {
fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
+ sample_len = SPECTRAL_HT20_SAMPLE_LEN;
num_bins = SPECTRAL_HT20_NUM_BINS;
- bins = (u8 *)fft_sample_20.data;
- }
-
- /* Variation in the data length is possible and will be fixed later */
- if ((len > fft_len + 2) || (len < fft_len - 1))
- return 1;
-
- switch (len - fft_len) {
- case 0:
- /* length correct, nothing to do. */
- memcpy(bins, vdata, num_bins);
- break;
- case -1:
- /* first byte missing, duplicate it. */
- memcpy(&bins[1], vdata, num_bins - 1);
- bins[0] = vdata[0];
- break;
- case 2:
- /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
- memcpy(bins, vdata, 30);
- bins[30] = vdata[31];
- memcpy(&bins[31], &vdata[33], num_bins - 31);
- break;
- case 1:
- /* MAC added 2 extra bytes AND first byte is missing. */
- bins[0] = vdata[0];
- memcpy(&bins[1], vdata, 30);
- bins[31] = vdata[31];
- memcpy(&bins[32], &vdata[33], num_bins - 32);
- break;
- default:
- return 1;
+ fft_idx_validator = ath_cmn_max_idx_verify_ht20_fft;
+ fft_handler = &ath_cmn_process_ht20_fft;
}
- /* DC value (value in the middle) is the blind spot of the spectral
- * sample and invalid, interpolate it.
- */
- dc_pos = num_bins / 2;
- bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
-
- if ((chan_type == NL80211_CHAN_HT40MINUS) ||
- (chan_type == NL80211_CHAN_HT40PLUS)) {
- s8 lower_rssi, upper_rssi;
- s16 ext_nf;
- u8 lower_max_index, upper_max_index;
- u8 lower_bitmap_w, upper_bitmap_w;
- u16 lower_mag, upper_mag;
- struct ath9k_hw_cal_data *caldata = ah->caldata;
- struct ath_ht20_40_mag_info *mag_info;
-
- if (caldata)
- ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
- caldata->nfCalHist[3].privNF);
- else
- ext_nf = ATH_DEFAULT_NOISE_FLOOR;
-
- length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
- fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
- fft_sample_40.tlv.length = __cpu_to_be16(length);
- fft_sample_40.freq = __cpu_to_be16(freq);
- fft_sample_40.channel_type = chan_type;
-
- if (chan_type == NL80211_CHAN_HT40PLUS) {
- lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
- upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
-
- fft_sample_40.lower_noise = ah->noise;
- fft_sample_40.upper_noise = ext_nf;
- } else {
- lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
- upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
-
- fft_sample_40.lower_noise = ext_nf;
- fft_sample_40.upper_noise = ah->noise;
+ ath_dbg(common, SPECTRAL_SCAN, "Got radar dump bw_info: 0x%X,"
+ "len: %i fft_len: %i\n",
+ radar_info->pulse_bw_info,
+ len,
+ fft_len);
+ sample_start = vdata;
+ for (i = 0; i < len - 2; i++) {
+ sample_bytes++;
+
+ /* Only a single sample received, no need to look
+ * for the sample's end, do the correction based
+ * on the packet's length instead. Note that hw
+ * will always put the radar_info structure on
+ * the end.
+ */
+ if (len <= fft_len + 2) {
+ sample_bytes = len - sizeof(struct ath_radar_info);
+ got_slen = 1;
}
- fft_sample_40.lower_rssi = lower_rssi;
- fft_sample_40.upper_rssi = upper_rssi;
-
- mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
- lower_mag = spectral_max_magnitude(mag_info->lower_bins);
- upper_mag = spectral_max_magnitude(mag_info->upper_bins);
- fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
- fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
- lower_max_index = spectral_max_index(mag_info->lower_bins);
- upper_max_index = spectral_max_index(mag_info->upper_bins);
- fft_sample_40.lower_max_index = lower_max_index;
- fft_sample_40.upper_max_index = upper_max_index;
- lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
- upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
- fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
- fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
- fft_sample_40.max_exp = mag_info->max_exp & 0xf;
- fft_sample_40.tsf = __cpu_to_be64(tsf);
-
- tlv = (struct fft_sample_tlv *)&fft_sample_40;
- } else {
- u8 max_index, bitmap_w;
- u16 magnitude;
- struct ath_ht20_mag_info *mag_info;
-
- length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
- fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
- fft_sample_20.tlv.length = __cpu_to_be16(length);
- fft_sample_20.freq = __cpu_to_be16(freq);
-
- fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
- fft_sample_20.noise = ah->noise;
-
- mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
- magnitude = spectral_max_magnitude(mag_info->all_bins);
- fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
- max_index = spectral_max_index(mag_info->all_bins);
- fft_sample_20.max_index = max_index;
- bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
- fft_sample_20.bitmap_weight = bitmap_w;
- fft_sample_20.max_exp = mag_info->max_exp & 0xf;
-
- fft_sample_20.tsf = __cpu_to_be64(tsf);
+ /* Search for the end of the FFT frame between
+ * sample_len - 1 and sample_len + 2. exp_max is 3
+ * bits long and it's the only value on the last
+ * byte of the frame so since it'll be smaller than
+ * the next byte (the first bin of the next sample)
+ * 90% of the time, we can use it as a separator.
+ */
+ if (vdata[i] <= 0x7 && sample_bytes >= sample_len - 1) {
+
+ /* Got a frame length within boundaries, there are
+ * four scenarios here:
+ *
+ * a) sample_len -> We got the correct length
+ * b) sample_len + 2 -> 2 bytes added around bin[31]
+ * c) sample_len - 1 -> The first byte is missing
+ * d) sample_len + 1 -> b + c at the same time
+ *
+ * When MAC adds 2 extra bytes, bin[31] and bin[32]
+ * have the same value, so we can use that for further
+ * verification in cases b and d.
+ */
+
+ /* Did we go too far ? If so we couldn't determine
+ * this sample's boundaries, discard any further
+ * data
+ */
+ if ((sample_bytes > sample_len + 2) ||
+ ((sample_bytes > sample_len) &&
+ (sample_start[31] != sample_start[32])))
+ break;
+
+ /* See if we got a valid frame by checking the
+ * consistency of mag_info fields. This is to
+ * prevent from "fixing" a correct frame.
+ * Failure is non-fatal, later frames may
+ * be valid.
+ */
+ if (!fft_idx_validator(&vdata[i], i)) {
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Found valid fft frame at %i\n", i);
+ got_slen = 1;
+ }
+
+ /* We expect 1 - 2 more bytes */
+ else if ((sample_start[31] == sample_start[32]) &&
+ (sample_bytes >= sample_len) &&
+ (sample_bytes < sample_len + 2) &&
+ (vdata[i + 1] <= 0x7))
+ continue;
+
+ /* Try to distinguish cases a and c */
+ else if ((sample_bytes == sample_len - 1) &&
+ (vdata[i + 1] <= 0x7))
+ continue;
+
+ got_slen = 1;
+ }
- tlv = (struct fft_sample_tlv *)&fft_sample_20;
+ if (got_slen) {
+ ath_dbg(common, SPECTRAL_SCAN, "FFT frame len: %i\n",
+ sample_bytes);
+
+ /* Only try to fix a frame if it's the only one
+ * on the report, else just skip it.
+ */
+ if (sample_bytes != sample_len && len <= fft_len + 2) {
+ ath_cmn_copy_fft_frame(sample_start,
+ sample_buf, sample_len,
+ sample_bytes);
+
+ fft_handler(rs, spec_priv, sample_buf,
+ tsf, freq, chan_type);
+
+ memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
+
+ /* Mix the received bins to the /dev/random
+ * pool
+ */
+ add_device_randomness(sample_buf, num_bins);
+ }
+
+ /* Process a normal frame */
+ if (sample_bytes == sample_len) {
+ ret = fft_handler(rs, spec_priv, sample_start,
+ tsf, freq, chan_type);
+
+ /* Mix the received bins to the /dev/random
+ * pool
+ */
+ add_device_randomness(sample_start, num_bins);
+ }
+
+ /* Short report processed, break out of the
+ * loop.
+ */
+ if (len <= fft_len + 2)
+ break;
+
+ sample_start = &vdata[i + 1];
+
+ /* -1 to grab sample_len -1, -2 since
+ * they 'll get increased by one. In case
+ * of failure try to recover by going byte
+ * by byte instead.
+ */
+ if (ret == 0) {
+ i += num_bins - 2;
+ sample_bytes = num_bins - 2;
+ }
+ got_slen = 0;
+ }
}
- ath_debug_send_fft_sample(spec_priv, tlv);
-
+ i -= num_bins - 2;
+ if (len - i != sizeof(struct ath_radar_info))
+ ath_dbg(common, SPECTRAL_SCAN, "FFT report truncated"
+ "(bytes left: %i)\n",
+ len - i);
return 1;
}
EXPORT_SYMBOL(ath_cmn_process_fft);
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h
index 82d9dd29652c..998743be9c67 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.h
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.h
@@ -66,6 +66,8 @@ struct ath_ht20_fft_packet {
} __packed;
#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet))
+#define SPECTRAL_HT20_SAMPLE_LEN (sizeof(struct ath_ht20_mag_info) +\
+ SPECTRAL_HT20_NUM_BINS)
/* Dynamic 20/40 mode:
*
@@ -101,6 +103,10 @@ struct ath_spec_scan_priv {
};
#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
+#define SPECTRAL_HT20_40_SAMPLE_LEN (sizeof(struct ath_ht20_40_mag_info) +\
+ SPECTRAL_HT20_40_NUM_BINS)
+
+#define SPECTRAL_SAMPLE_MAX_LEN SPECTRAL_HT20_40_SAMPLE_LEN
/* grabs the max magnitude from the all/upper/lower bins */
static inline u16 spectral_max_magnitude(u8 *bins)
@@ -111,17 +117,32 @@ static inline u16 spectral_max_magnitude(u8 *bins)
}
/* return the max magnitude from the all/upper/lower bins */
-static inline u8 spectral_max_index(u8 *bins)
+static inline u8 spectral_max_index(u8 *bins, int num_bins)
{
s8 m = (bins[2] & 0xfc) >> 2;
-
- /* TODO: this still doesn't always report the right values ... */
- if (m > 32)
+ u8 zero_idx = num_bins / 2;
+
+ /* It's a 5 bit signed int, remove its sign and use one's
+ * complement interpretation to add the sign back to the 8
+ * bit int
+ */
+ if (m & 0x20) {
+ m &= ~0x20;
m |= 0xe0;
- else
- m &= ~0xe0;
+ }
+
+ /* Bring the zero point to the beginning
+ * instead of the middle so that we can use
+ * it for array lookup and that we don't deal
+ * with negative values later
+ */
+ m += zero_idx;
+
+ /* Sanity check to make sure index is within bounds */
+ if (m < 0 || m > num_bins - 1)
+ m = 0;
- return m + 29;
+ return m;
}
/* return the bitmap weight from the all/upper/lower bins */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index e82a0d4ce23f..5dbc617ecf8a 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -440,9 +440,9 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
-#define OP_BT_PRIORITY_DETECTED BIT(3)
-#define OP_BT_SCAN BIT(4)
-#define OP_TSF_RESET BIT(6)
+#define OP_BT_PRIORITY_DETECTED 3
+#define OP_BT_SCAN 4
+#define OP_TSF_RESET 6
enum htc_op_flags {
HTC_FWFLAG_NO_RMW,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index d7beefe60683..746856243bff 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -594,7 +594,7 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
priv->spec_priv.ah = priv->ah;
priv->spec_priv.spec_config.enabled = 0;
- priv->spec_priv.spec_config.short_repeat = false;
+ priv->spec_priv.spec_config.short_repeat = true;
priv->spec_priv.spec_config.count = 8;
priv->spec_priv.spec_config.endless = false;
priv->spec_priv.spec_config.period = 0x12;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 564923c0df87..b71f3072fd9a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1238,8 +1238,7 @@ out:
}
#define SUPPORTED_FILTERS \
- (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
+ (FIF_ALLMULTI | \
FIF_CONTROL | \
FIF_PSPOLL | \
FIF_OTHER_BSS | \
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index a0f58e2aa553..cc9648f844ae 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -872,14 +872,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
if (priv->rxfilter & FIF_PROBE_REQ)
rfilt |= ATH9K_RX_FILTER_PROBEREQ;
- /*
- * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
- * mode interface or when in monitor mode. AP mode does not need this
- * since it receives all in-BSS frames anyway.
- */
- if (((ah->opmode != NL80211_IFTYPE_AP) &&
- (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
- ah->is_monitoring)
+ if (ah->is_monitoring)
rfilt |= ATH9K_RX_FILTER_PROM;
if (priv->rxfilter & FIF_CONTROL)
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index c1d2d0340feb..e8454db17634 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1119,6 +1119,8 @@ bool ar9003_is_paprd_enabled(struct ath_hw *ah);
void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array,
struct ath9k_channel *chan);
+void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan, int bin);
void ar5008_hw_init_rate_txpower(struct ath_hw *ah, int16_t *rate_array,
struct ath9k_channel *chan, int ht40_delta);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index b0badef71ce7..d285e3a89853 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1442,8 +1442,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
#define SUPPORTED_FILTERS \
- (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
+ (FIF_ALLMULTI | \
FIF_CONTROL | \
FIF_PSPOLL | \
FIF_OTHER_BSS | \
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 6fb40ef86fd6..6c75fb1ab77d 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -392,11 +392,6 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
if (sc->cur_chan->rxfilter & FIF_PROBE_REQ)
rfilt |= ATH9K_RX_FILTER_PROBEREQ;
- /*
- * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
- * mode interface or when in monitor mode. AP mode does not need this
- * since it receives all in-BSS frames anyway.
- */
if (sc->sc_ah->is_monitoring)
rfilt |= ATH9K_RX_FILTER_PROM;
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 47d5c2e910ad..020cd46471f5 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -310,8 +310,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
if (SUPP(CARL9170FW_RX_FILTER)) {
ar->fw.rx_filter = true;
ar->rx_filter_caps = FIF_FCSFAIL | FIF_PLCPFAIL |
- FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS |
- FIF_PROMISC_IN_BSS;
+ FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS;
}
if (SUPP(CARL9170FW_HW_COUNTERS))
diff --git a/drivers/net/wireless/ath/carl9170/led.c b/drivers/net/wireless/ath/carl9170/led.c
index 78dadc797558..2c74425f5059 100644
--- a/drivers/net/wireless/ath/carl9170/led.c
+++ b/drivers/net/wireless/ath/carl9170/led.c
@@ -122,7 +122,7 @@ static void carl9170_led_set_brightness(struct led_classdev *led,
}
static int carl9170_led_register_led(struct ar9170 *ar, int i, char *name,
- char *trigger)
+ const char *trigger)
{
int err;
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index f1455a04cb62..59db6732d4e3 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1011,9 +1011,8 @@ static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
if (multicast != ar->cur_mc_hash)
WARN_ON(carl9170_update_multicast(ar, multicast));
- if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
- ar->sniffer_enabled = !!(*new_flags &
- (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
+ if (changed_flags & FIF_OTHER_BSS) {
+ ar->sniffer_enabled = !!(*new_flags & FIF_OTHER_BSS);
WARN_ON(carl9170_set_operating_mode(ar));
}
@@ -1033,7 +1032,7 @@ static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
if (!(*new_flags & FIF_PSPOLL))
rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
- if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
+ if (!(*new_flags & FIF_OTHER_BSS)) {
rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
}
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index c9f93310c0d6..76842e6ca38e 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -651,6 +651,7 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
unsigned int plen, void *payload, unsigned int outlen, void *out)
{
int err = -ENOMEM;
+ unsigned long time_left;
if (!IS_ACCEPTING_CMD(ar))
return -EIO;
@@ -672,8 +673,8 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
err = __carl9170_exec_cmd(ar, &ar->cmd, false);
if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
- err = wait_for_completion_timeout(&ar->cmd_wait, HZ);
- if (err == 0) {
+ time_left = wait_for_completion_timeout(&ar->cmd_wait, HZ);
+ if (time_left == 0) {
err = -ETIMEDOUT;
goto err_unbuf;
}
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index c657ca26a71a..656ce42b339a 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -41,30 +41,31 @@ struct radar_types {
/* percentage on ppb threshold to trigger detection */
#define MIN_PPB_THRESH 50
-#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
+#define PPB_THRESH_RATE(PPB, RATE) ((PPB * RATE + 100 - RATE) / 100)
+#define PPB_THRESH(PPB) PPB_THRESH_RATE(PPB, MIN_PPB_THRESH)
#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
/* percentage of pulse width tolerance */
#define WIDTH_TOLERANCE 5
#define WIDTH_LOWER(X) ((X*(100-WIDTH_TOLERANCE)+50)/100)
#define WIDTH_UPPER(X) ((X*(100+WIDTH_TOLERANCE)+50)/100)
-#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
+#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP) \
{ \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
(PRF2PRI(PMAX) - PRI_TOLERANCE), \
(PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \
- PPB_THRESH(PPB), PRI_TOLERANCE, \
+ PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP \
}
/* radar types as defined by ETSI EN-301-893 v1.5.1 */
static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
- ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18),
- ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10),
- ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15),
- ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25),
- ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20),
- ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10),
- ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15),
+ ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18, false),
+ ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10, false),
+ ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15, false),
+ ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25, false),
+ ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20, false),
+ ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10, false),
+ ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15, false),
};
static const struct radar_types etsi_radar_types_v15 = {
@@ -73,21 +74,30 @@ static const struct radar_types etsi_radar_types_v15 = {
.radar_types = etsi_radar_ref_types_v15,
};
-#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
+#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP) \
{ \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
PMIN - PRI_TOLERANCE, \
PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
- PPB_THRESH(PPB), PRI_TOLERANCE, \
+ PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP \
}
+/* radar types released on August 14, 2014
+ * type 1 PRI values randomly selected within the range of 518 and 3066.
+ * divide it to 3 groups is good enough for both of radar detection and
+ * avoiding false detection based on practical test results
+ * collected for more than a year.
+ */
static const struct radar_detector_specs fcc_radar_ref_types[] = {
- FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
- FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
- FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
- FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
- FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 1),
- FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
+ FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18, false),
+ FCC_PATTERN(101, 0, 1, 518, 938, 1, 57, false),
+ FCC_PATTERN(102, 0, 1, 938, 2000, 1, 27, false),
+ FCC_PATTERN(103, 0, 1, 2000, 3066, 1, 18, false),
+ FCC_PATTERN(2, 0, 5, 150, 230, 1, 23, false),
+ FCC_PATTERN(3, 6, 10, 200, 500, 1, 16, false),
+ FCC_PATTERN(4, 11, 20, 200, 500, 1, 12, false),
+ FCC_PATTERN(5, 50, 100, 1000, 2000, 1, 1, true),
+ FCC_PATTERN(6, 0, 1, 333, 333, 1, 9, false),
};
static const struct radar_types fcc_radar_types = {
@@ -96,17 +106,23 @@ static const struct radar_types fcc_radar_types = {
.radar_types = fcc_radar_ref_types,
};
-#define JP_PATTERN FCC_PATTERN
+#define JP_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, RATE, CHIRP) \
+{ \
+ ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
+ PMIN - PRI_TOLERANCE, \
+ PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
+ PPB_THRESH_RATE(PPB, RATE), PRI_TOLERANCE, CHIRP \
+}
static const struct radar_detector_specs jp_radar_ref_types[] = {
- JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
- JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18),
- JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18),
- JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18),
- JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
- JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
- JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
- JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20),
- JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
+ JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
+ JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
+ JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
+ JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
+ JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
+ JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
+ JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
+ JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20, 50, false),
+ JP_PATTERN(5, 0, 1, 333, 333, 1, 9, 50, false),
};
static const struct radar_types jp_radar_types = {
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.h b/drivers/net/wireless/ath/dfs_pattern_detector.h
index dde2652b787c..25a43d632f90 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.h
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.h
@@ -40,12 +40,14 @@ struct ath_dfs_pool_stats {
* @freq: channel frequency in MHz
* @width: pulse duration in us
* @rssi: rssi of radar event
+ * @chirp: chirp detected in pulse
*/
struct pulse_event {
u64 ts;
u16 freq;
u8 width;
u8 rssi;
+ bool chirp;
};
/**
@@ -59,6 +61,7 @@ struct pulse_event {
* @ppb: pulses per bursts for this type
* @ppb_thresh: number of pulses required to trigger detection
* @max_pri_tolerance: pulse time stamp tolerance on both sides [us]
+ * @chirp: chirp required for the radar pattern
*/
struct radar_detector_specs {
u8 type_id;
@@ -70,6 +73,7 @@ struct radar_detector_specs {
u8 ppb;
u8 ppb_thresh;
u8 max_pri_tolerance;
+ bool chirp;
};
/**
diff --git a/drivers/net/wireless/ath/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
index 43b608178884..1b5ad1965607 100644
--- a/drivers/net/wireless/ath/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -390,6 +390,10 @@ static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
if ((ts - de->last_ts) < rs->max_pri_tolerance)
/* if delta to last pulse is too short, don't use this pulse */
return NULL;
+ /* radar detector spec needs chirp, but not detected */
+ if (rs->chirp && rs->chirp != event->chirp)
+ return NULL;
+
de->last_ts = ts;
max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index caa717bf52f3..050506f842e9 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -12,6 +12,7 @@ wil6210-y += debug.o
wil6210-y += rx_reorder.o
wil6210-y += ioctl.o
wil6210-y += fw.o
+wil6210-y += pmc.o
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
wil6210-y += wil_platform.o
wil6210-y += ethtool.o
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index b97172667bc7..dbfcdd16628a 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -402,11 +402,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
rsn_eid = sme->ie ?
cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
NULL;
-
- if (sme->privacy && !rsn_eid) {
- wil_err(wil, "Missing RSN IE for secure connection\n");
- return -EINVAL;
- }
+ if (sme->privacy && !rsn_eid)
+ wil_info(wil, "WSC connection\n");
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len,
@@ -425,10 +422,17 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
wil->privacy = sme->privacy;
if (wil->privacy) {
- /* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */
- rc = wmi_del_cipher_key(wil, 0, bss->bssid);
+ /* For secure assoc, remove old keys */
+ rc = wmi_del_cipher_key(wil, 0, bss->bssid,
+ WMI_KEY_USE_PAIRWISE);
if (rc) {
- wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n");
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n");
+ goto out;
+ }
+ rc = wmi_del_cipher_key(wil, 0, bss->bssid,
+ WMI_KEY_USE_RX_GROUP);
+ if (rc) {
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n");
goto out;
}
}
@@ -458,11 +462,18 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
goto out;
}
if (wil->privacy) {
- conn.dot11_auth_mode = WMI_AUTH11_SHARED;
- conn.auth_mode = WMI_AUTH_WPA2_PSK;
- conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
- conn.pairwise_crypto_len = 16;
- } else {
+ if (rsn_eid) { /* regular secure connection */
+ conn.dot11_auth_mode = WMI_AUTH11_SHARED;
+ conn.auth_mode = WMI_AUTH_WPA2_PSK;
+ conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
+ conn.pairwise_crypto_len = 16;
+ conn.group_crypto_type = WMI_CRYPT_AES_GCMP;
+ conn.group_crypto_len = 16;
+ } else { /* WSC */
+ conn.dot11_auth_mode = WMI_AUTH11_WSC;
+ conn.auth_mode = WMI_AUTH_NONE;
+ }
+ } else { /* insecure connection */
conn.dot11_auth_mode = WMI_AUTH11_OPEN;
conn.auth_mode = WMI_AUTH_NONE;
}
@@ -507,6 +518,8 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code);
+
rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
return rc;
@@ -561,6 +574,39 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
return 0;
}
+static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
+ bool pairwise)
+{
+ struct wireless_dev *wdev = wil->wdev;
+ enum wmi_key_usage rc;
+ static const char * const key_usage_str[] = {
+ [WMI_KEY_USE_PAIRWISE] = "WMI_KEY_USE_PAIRWISE",
+ [WMI_KEY_USE_RX_GROUP] = "WMI_KEY_USE_RX_GROUP",
+ [WMI_KEY_USE_TX_GROUP] = "WMI_KEY_USE_TX_GROUP",
+ };
+
+ if (pairwise) {
+ rc = WMI_KEY_USE_PAIRWISE;
+ } else {
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ rc = WMI_KEY_USE_RX_GROUP;
+ break;
+ case NL80211_IFTYPE_AP:
+ rc = WMI_KEY_USE_TX_GROUP;
+ break;
+ default:
+ /* TODO: Rx GTK or Tx GTK? */
+ wil_err(wil, "Can't determine GTK type\n");
+ rc = WMI_KEY_USE_RX_GROUP;
+ break;
+ }
+ }
+ wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]);
+
+ return rc;
+}
+
static int wil_cfg80211_add_key(struct wiphy *wiphy,
struct net_device *ndev,
u8 key_index, bool pairwise,
@@ -568,13 +614,13 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
struct key_params *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
- /* group key is not used */
- if (!pairwise)
- return 0;
+ wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
+ pairwise ? "PTK" : "GTK");
- return wmi_add_cipher_key(wil, key_index, mac_addr,
- params->key_len, params->key);
+ return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
+ params->key, key_usage);
}
static int wil_cfg80211_del_key(struct wiphy *wiphy,
@@ -583,12 +629,12 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
const u8 *mac_addr)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
- /* group key is not used */
- if (!pairwise)
- return 0;
+ wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
+ pairwise ? "PTK" : "GTK");
- return wmi_del_cipher_key(wil, key_index, mac_addr);
+ return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
}
/* Need to be present or wiphy_new() will WARN */
@@ -661,11 +707,6 @@ static int wil_fix_bcon(struct wil6210_priv *wil,
if (bcon->probe_resp_len <= hlen)
return 0;
- if (!bcon->proberesp_ies) {
- bcon->proberesp_ies = f->u.probe_resp.variable;
- bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
- rc = 1;
- }
if (!bcon->assocresp_ies) {
bcon->assocresp_ies = f->u.probe_resp.variable;
bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
@@ -680,9 +721,19 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
struct cfg80211_beacon_data *bcon)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
+ size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+ const u8 *pr_ies = NULL;
+ size_t pr_ies_len = 0;
int rc;
wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_print_bcon_data(bcon);
+
+ if (bcon->probe_resp_len > hlen) {
+ pr_ies = f->u.probe_resp.variable;
+ pr_ies_len = bcon->probe_resp_len - hlen;
+ }
if (wil_fix_bcon(wil, bcon)) {
wil_dbg_misc(wil, "Fixed bcon\n");
@@ -695,9 +746,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
* wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
* bcon->beacon_ies);
*/
- rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
- bcon->proberesp_ies_len,
- bcon->proberesp_ies);
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
if (rc) {
wil_err(wil, "set_ie(PROBE_RESP) failed\n");
return rc;
@@ -725,6 +774,10 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct cfg80211_beacon_data *bcon = &info->beacon;
struct cfg80211_crypto_settings *crypto = &info->crypto;
u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+ struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
+ size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+ const u8 *pr_ies = NULL;
+ size_t pr_ies_len = 0;
wil_dbg_misc(wil, "%s()\n", __func__);
@@ -744,6 +797,11 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
wil_print_bcon_data(bcon);
wil_print_crypto(wil, crypto);
+ if (bcon->probe_resp_len > hlen) {
+ pr_ies = f->u.probe_resp.variable;
+ pr_ies_len = bcon->probe_resp_len - hlen;
+ }
+
if (wil_fix_bcon(wil, bcon)) {
wil_dbg_misc(wil, "Fixed bcon\n");
wil_print_bcon_data(bcon);
@@ -771,8 +829,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
* wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
* bcon->beacon_ies);
*/
- wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
- bcon->proberesp_ies);
+ wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
bcon->assocresp_ies);
@@ -814,13 +871,9 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
wmi_pcp_stop(wil);
__wil_down(wil);
- __wil_up(wil);
mutex_unlock(&wil->mutex);
- /* some functions above might fail (e.g. __wil_up). Nevertheless, we
- * return success because AP has stopped
- */
return 0;
}
@@ -830,6 +883,9 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac,
+ params->reason_code);
+
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, params->mac, params->reason_code, false);
mutex_unlock(&wil->mutex);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index bbc22d88f78f..8f9c0722a801 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -24,6 +24,7 @@
#include "wil6210.h"
#include "wmi.h"
#include "txrx.h"
+#include "pmc.h"
/* Nasty hack. Better have per device instances */
static u32 mem_addr;
@@ -123,15 +124,17 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
if (cid < WIL6210_MAX_CID)
seq_printf(s,
- "\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
+ "\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
wil->sta[cid].addr, cid, tid,
+ txdata->dot1x_open ? "+" : "-",
txdata->agg_wsize,
txdata->agg_timeout,
txdata->agg_amsdu ? "+" : "-",
used, avail, sidle);
else
seq_printf(s,
- "\nBroadcast [%3d|%3d] idle %s\n",
+ "\nBroadcast 1x%s [%3d|%3d] idle %s\n",
+ txdata->dot1x_open ? "+" : "-",
used, avail, sidle);
wil_print_vring(s, wil, name, vring, '_', 'H');
@@ -702,6 +705,89 @@ static const struct file_operations fops_back = {
.open = simple_open,
};
+/* pmc control, write:
+ * - "alloc <num descriptors> <descriptor_size>" to allocate PMC
+ * - "free" to release memory allocated for PMC
+ */
+static ssize_t wil_write_pmccfg(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ int rc;
+ char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+ char cmd[9];
+ int num_descs, desc_size;
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+ if (rc != len) {
+ kfree(kbuf);
+ return rc >= 0 ? -EIO : rc;
+ }
+
+ kbuf[len] = '\0';
+ rc = sscanf(kbuf, "%8s %d %d", cmd, &num_descs, &desc_size);
+ kfree(kbuf);
+
+ if (rc < 0)
+ return rc;
+
+ if (rc < 1) {
+ wil_err(wil, "pmccfg: no params given\n");
+ return -EINVAL;
+ }
+
+ if (0 == strcmp(cmd, "alloc")) {
+ if (rc != 3) {
+ wil_err(wil, "pmccfg: alloc requires 2 params\n");
+ return -EINVAL;
+ }
+ wil_pmc_alloc(wil, num_descs, desc_size);
+ } else if (0 == strcmp(cmd, "free")) {
+ if (rc != 1) {
+ wil_err(wil, "pmccfg: free does not have any params\n");
+ return -EINVAL;
+ }
+ wil_pmc_free(wil, true);
+ } else {
+ wil_err(wil, "pmccfg: Unrecognized command \"%s\"\n", cmd);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+static ssize_t wil_read_pmccfg(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ char text[256];
+ char help[] = "pmc control, write:\n"
+ " - \"alloc <num descriptors> <descriptor_size>\" to allocate pmc\n"
+ " - \"free\" to free memory allocated for pmc\n";
+
+ sprintf(text, "Last command status: %d\n\n%s",
+ wil_pmc_last_cmd_status(wil),
+ help);
+
+ return simple_read_from_buffer(user_buf, count, ppos, text,
+ strlen(text) + 1);
+}
+
+static const struct file_operations fops_pmccfg = {
+ .read = wil_read_pmccfg,
+ .write = wil_write_pmccfg,
+ .open = simple_open,
+};
+
+static const struct file_operations fops_pmcdata = {
+ .open = simple_open,
+ .read = wil_pmc_read,
+ .llseek = wil_pmc_llseek,
+};
+
/*---tx_mgmt---*/
/* Write mgmt frame to this file to send it */
static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
@@ -1111,8 +1197,7 @@ static int wil_link_debugfs_show(struct seq_file *s, void *data)
status = "connected";
break;
}
- seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
- (p->data_port_open ? " data_port_open" : ""));
+ seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
if (p->status == wil_sta_connected) {
rc = wil_cid_fill_sinfo(wil, i, &sinfo);
@@ -1292,8 +1377,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
status = "connected";
break;
}
- seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
- (p->data_port_open ? " data_port_open" : ""));
+ seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
if (p->status == wil_sta_connected) {
spin_lock_bh(&p->tid_rx_lock);
@@ -1363,6 +1447,8 @@ static const struct {
{"tx_mgmt", S_IWUSR, &fops_txmgmt},
{"wmi_send", S_IWUSR, &fops_wmi},
{"back", S_IRUGO | S_IWUSR, &fops_back},
+ {"pmccfg", S_IRUGO | S_IWUSR, &fops_pmccfg},
+ {"pmcdata", S_IRUGO, &fops_pmcdata},
{"temp", S_IRUGO, &fops_temp},
{"freq", S_IRUGO, &fops_freq},
{"link", S_IRUGO, &fops_link},
@@ -1440,6 +1526,8 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
if (IS_ERR_OR_NULL(dbg))
return -ENODEV;
+ wil_pmc_init(wil);
+
wil6210_debugfs_init_files(wil, dbg);
wil6210_debugfs_init_isr(wil, dbg);
wil6210_debugfs_init_blobs(wil, dbg);
@@ -1459,4 +1547,9 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil)
{
debugfs_remove_recursive(wil->debug);
wil->debug = NULL;
+
+ /* free pmc memory without sending command to fw, as it will
+ * be reset on the way down anyway
+ */
+ wil_pmc_free(wil, false);
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index c2a238426425..6d704aee3afd 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -25,6 +25,10 @@
#define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
#define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
+bool debug_fw; /* = false; */
+module_param(debug_fw, bool, S_IRUGO);
+MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
+
bool no_fw_recovery;
module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
@@ -146,7 +150,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
sta->status);
- sta->data_port_open = false;
if (sta->status != wil_sta_unused) {
if (!from_event)
wmi_disconnect_sta(wil, sta->addr, reason_code);
@@ -224,7 +227,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
if (test_bit(wil_status_fwconnected, wil->status)) {
clear_bit(wil_status_fwconnected, wil->status);
cfg80211_disconnected(ndev, reason_code,
- NULL, 0, GFP_KERNEL);
+ NULL, 0, false, GFP_KERNEL);
} else if (test_bit(wil_status_fwconnecting, wil->status)) {
cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
@@ -373,9 +376,10 @@ int wil_bcast_init(struct wil6210_priv *wil)
if (ri < 0)
return ri;
+ wil->bcast_vring = ri;
rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order);
- if (rc == 0)
- wil->bcast_vring = ri;
+ if (rc)
+ wil->bcast_vring = -1;
return rc;
}
@@ -547,7 +551,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
static int wil_target_reset(struct wil6210_priv *wil)
{
int delay = 0;
- u32 x;
+ u32 x, x1 = 0;
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
@@ -602,12 +606,16 @@ static int wil_target_reset(struct wil6210_priv *wil)
do {
msleep(RST_DELAY);
x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
+ if (x1 != x) {
+ wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
+ x1 = x;
+ }
if (delay++ > RST_COUNT) {
wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
x);
return -ETIME;
}
- } while (!(x & BIT_BL_READY));
+ } while (x != BIT_BL_READY);
C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
@@ -686,6 +694,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
WARN_ON(!mutex_is_locked(&wil->mutex));
WARN_ON(test_bit(wil_status_napi_en, wil->status));
+ if (debug_fw) {
+ static const u8 mac[ETH_ALEN] = {
+ 0x00, 0xde, 0xad, 0x12, 0x34, 0x56,
+ };
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ ether_addr_copy(ndev->perm_addr, mac);
+ ether_addr_copy(ndev->dev_addr, ndev->perm_addr);
+ return 0;
+ }
+
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
wil_bcast_fini(wil);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index f2f7ea29558e..6042f61b016c 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -24,6 +24,11 @@ static int wil_open(struct net_device *ndev)
wil_dbg_misc(wil, "%s()\n", __func__);
+ if (debug_fw) {
+ wil_err(wil, "%s() while in debug_fw mode\n", __func__);
+ return -EINVAL;
+ }
+
return wil_up(wil);
}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 109986114abf..58c79166a6d1 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -27,10 +27,6 @@ MODULE_PARM_DESC(use_msi,
" Use MSI interrupt: "
"0 - don't, 1 - (default) - single, or 3");
-static bool debug_fw; /* = false; */
-module_param(debug_fw, bool, S_IRUGO);
-MODULE_PARM_DESC(debug_fw, " load driver if FW not ready. For FW debug");
-
static
void wil_set_capabilities(struct wil6210_priv *wil)
{
@@ -133,8 +129,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
mutex_lock(&wil->mutex);
rc = wil_reset(wil, false);
mutex_unlock(&wil->mutex);
- if (debug_fw)
- rc = 0;
if (rc)
goto release_irq;
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
new file mode 100644
index 000000000000..8a8cdc61b25b
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include "wmi.h"
+#include "wil6210.h"
+#include "txrx.h"
+#include "pmc.h"
+
+struct desc_alloc_info {
+ dma_addr_t pa;
+ void *va;
+};
+
+static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
+{
+ return !!pmc->pring_va;
+}
+
+void wil_pmc_init(struct wil6210_priv *wil)
+{
+ memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
+ mutex_init(&wil->pmc.lock);
+}
+
+/**
+ * Allocate the physical ring (p-ring) and the required
+ * number of descriptors of required size.
+ * Initialize the descriptors as required by pmc dma.
+ * The descriptors' buffers dwords are initialized to hold
+ * dword's serial number in the lsw and reserved value
+ * PCM_DATA_INVALID_DW_VAL in the msw.
+ */
+void wil_pmc_alloc(struct wil6210_priv *wil,
+ int num_descriptors,
+ int descriptor_size)
+{
+ u32 i;
+ struct pmc_ctx *pmc = &wil->pmc;
+ struct device *dev = wil_to_dev(wil);
+ struct wmi_pmc_cmd pmc_cmd = {0};
+
+ mutex_lock(&pmc->lock);
+
+ if (wil_is_pmc_allocated(pmc)) {
+ /* sanity check */
+ wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
+ goto no_release_err;
+ }
+
+ pmc->num_descriptors = num_descriptors;
+ pmc->descriptor_size = descriptor_size;
+
+ wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n",
+ __func__, num_descriptors, descriptor_size);
+
+ /* allocate descriptors info list in pmc context*/
+ pmc->descriptors = kcalloc(num_descriptors,
+ sizeof(struct desc_alloc_info),
+ GFP_KERNEL);
+ if (!pmc->descriptors) {
+ wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__);
+ goto no_release_err;
+ }
+
+ wil_dbg_misc(wil,
+ "%s: allocated descriptors info list %p\n",
+ __func__, pmc->descriptors);
+
+ /* Allocate pring buffer and descriptors.
+ * vring->va should be aligned on its size rounded up to power of 2
+ * This is granted by the dma_alloc_coherent
+ */
+ pmc->pring_va = dma_alloc_coherent(dev,
+ sizeof(struct vring_tx_desc) * num_descriptors,
+ &pmc->pring_pa,
+ GFP_KERNEL);
+
+ wil_dbg_misc(wil,
+ "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
+ __func__,
+ pmc->pring_va, &pmc->pring_pa,
+ sizeof(struct vring_tx_desc),
+ num_descriptors,
+ sizeof(struct vring_tx_desc) * num_descriptors);
+
+ if (!pmc->pring_va) {
+ wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__);
+ goto release_pmc_skb_list;
+ }
+
+ /* initially, all descriptors are SW owned
+ * For Tx, Rx, and PMC, ownership bit is at the same location, thus
+ * we can use any
+ */
+ for (i = 0; i < num_descriptors; i++) {
+ struct vring_tx_desc *_d = &pmc->pring_va[i];
+ struct vring_tx_desc dd, *d = &dd;
+ int j = 0;
+
+ pmc->descriptors[i].va = dma_alloc_coherent(dev,
+ descriptor_size,
+ &pmc->descriptors[i].pa,
+ GFP_KERNEL);
+
+ if (unlikely(!pmc->descriptors[i].va)) {
+ wil_err(wil,
+ "%s: ERROR allocating pmc descriptor %d",
+ __func__, i);
+ goto release_pmc_skbs;
+ }
+
+ for (j = 0; j < descriptor_size / sizeof(u32); j++) {
+ u32 *p = (u32 *)pmc->descriptors[i].va + j;
+ *p = PCM_DATA_INVALID_DW_VAL | j;
+ }
+
+ /* configure dma descriptor */
+ d->dma.addr.addr_low =
+ cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
+ d->dma.addr.addr_high =
+ cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
+ d->dma.status = 0; /* 0 = HW_OWNED */
+ d->dma.length = cpu_to_le16(descriptor_size);
+ d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+ *_d = *d;
+ }
+
+ wil_dbg_misc(wil, "%s: allocated successfully\n", __func__);
+
+ pmc_cmd.op = WMI_PMC_ALLOCATE;
+ pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
+ pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
+
+ wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__);
+ pmc->last_cmd_status = wmi_send(wil,
+ WMI_PMC_CMDID,
+ &pmc_cmd,
+ sizeof(pmc_cmd));
+ if (pmc->last_cmd_status) {
+ wil_err(wil,
+ "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d",
+ __func__, pmc->last_cmd_status);
+ goto release_pmc_skbs;
+ }
+
+ mutex_unlock(&pmc->lock);
+
+ return;
+
+release_pmc_skbs:
+ wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__);
+ for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
+ dma_free_coherent(dev,
+ descriptor_size,
+ pmc->descriptors[i].va,
+ pmc->descriptors[i].pa);
+
+ pmc->descriptors[i].va = NULL;
+ }
+ wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__);
+
+ dma_free_coherent(dev,
+ sizeof(struct vring_tx_desc) * num_descriptors,
+ pmc->pring_va,
+ pmc->pring_pa);
+
+ pmc->pring_va = NULL;
+
+release_pmc_skb_list:
+ wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n",
+ __func__);
+ kfree(pmc->descriptors);
+ pmc->descriptors = NULL;
+
+no_release_err:
+ pmc->last_cmd_status = -ENOMEM;
+ mutex_unlock(&pmc->lock);
+}
+
+/**
+ * Traverse the p-ring and release all buffers.
+ * At the end release the p-ring memory
+ */
+void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
+{
+ struct pmc_ctx *pmc = &wil->pmc;
+ struct device *dev = wil_to_dev(wil);
+ struct wmi_pmc_cmd pmc_cmd = {0};
+
+ mutex_lock(&pmc->lock);
+
+ pmc->last_cmd_status = 0;
+
+ if (!wil_is_pmc_allocated(pmc)) {
+ wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n",
+ __func__);
+ pmc->last_cmd_status = -EPERM;
+ mutex_unlock(&pmc->lock);
+ return;
+ }
+
+ if (send_pmc_cmd) {
+ wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n",
+ __func__);
+ pmc_cmd.op = WMI_PMC_RELEASE;
+ pmc->last_cmd_status =
+ wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd,
+ sizeof(pmc_cmd));
+ if (pmc->last_cmd_status) {
+ wil_err(wil,
+ "%s WMI_PMC_CMD with RELEASE op failed, status %d",
+ __func__, pmc->last_cmd_status);
+ /* There's nothing we can do with this error.
+ * Normally, it should never occur.
+ * Continue to freeing all memory allocated for pmc.
+ */
+ }
+ }
+
+ if (pmc->pring_va) {
+ size_t buf_size = sizeof(struct vring_tx_desc) *
+ pmc->num_descriptors;
+
+ wil_dbg_misc(wil, "%s: free pring va %p\n",
+ __func__, pmc->pring_va);
+ dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
+
+ pmc->pring_va = NULL;
+ } else {
+ pmc->last_cmd_status = -ENOENT;
+ }
+
+ if (pmc->descriptors) {
+ int i;
+
+ for (i = 0;
+ pmc->descriptors[i].va && i < pmc->num_descriptors; i++) {
+ dma_free_coherent(dev,
+ pmc->descriptor_size,
+ pmc->descriptors[i].va,
+ pmc->descriptors[i].pa);
+ pmc->descriptors[i].va = NULL;
+ }
+ wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n",
+ __func__, i, pmc->num_descriptors);
+ wil_dbg_misc(wil,
+ "%s: free pmc descriptors info list %p\n",
+ __func__, pmc->descriptors);
+ kfree(pmc->descriptors);
+ pmc->descriptors = NULL;
+ } else {
+ pmc->last_cmd_status = -ENOENT;
+ }
+
+ mutex_unlock(&pmc->lock);
+}
+
+/**
+ * Status of the last operation requested via debugfs: alloc/free/read.
+ * 0 - success or negative errno
+ */
+int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "%s: status %d\n", __func__,
+ wil->pmc.last_cmd_status);
+
+ return wil->pmc.last_cmd_status;
+}
+
+/**
+ * Read from required position up to the end of current descriptor,
+ * depends on descriptor size configured during alloc request.
+ */
+ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct wil6210_priv *wil = filp->private_data;
+ struct pmc_ctx *pmc = &wil->pmc;
+ size_t retval = 0;
+ unsigned long long idx;
+ loff_t offset;
+ size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+
+ mutex_lock(&pmc->lock);
+
+ if (!wil_is_pmc_allocated(pmc)) {
+ wil_err(wil, "%s: error, pmc is not allocated!\n", __func__);
+ pmc->last_cmd_status = -EPERM;
+ mutex_unlock(&pmc->lock);
+ return -EPERM;
+ }
+
+ wil_dbg_misc(wil,
+ "%s: size %u, pos %lld\n",
+ __func__, (unsigned)count, *f_pos);
+
+ pmc->last_cmd_status = 0;
+
+ idx = *f_pos;
+ do_div(idx, pmc->descriptor_size);
+ offset = *f_pos - (idx * pmc->descriptor_size);
+
+ if (*f_pos >= pmc_size) {
+ wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n",
+ __func__, *f_pos, (unsigned)pmc_size);
+ pmc->last_cmd_status = -ERANGE;
+ goto out;
+ }
+
+ wil_dbg_misc(wil,
+ "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
+ __func__, *f_pos, idx, offset, count);
+
+ /* if no errors, return the copied byte count */
+ retval = simple_read_from_buffer(buf,
+ count,
+ &offset,
+ pmc->descriptors[idx].va,
+ pmc->descriptor_size);
+ *f_pos += retval;
+out:
+ mutex_unlock(&pmc->lock);
+
+ return retval;
+}
+
+loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
+{
+ loff_t newpos;
+ struct wil6210_priv *wil = filp->private_data;
+ struct pmc_ctx *pmc = &wil->pmc;
+ size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+
+ switch (whence) {
+ case 0: /* SEEK_SET */
+ newpos = off;
+ break;
+
+ case 1: /* SEEK_CUR */
+ newpos = filp->f_pos + off;
+ break;
+
+ case 2: /* SEEK_END */
+ newpos = pmc_size;
+ break;
+
+ default: /* can't happen */
+ return -EINVAL;
+ }
+
+ if (newpos < 0)
+ return -EINVAL;
+ if (newpos > pmc_size)
+ newpos = pmc_size;
+
+ filp->f_pos = newpos;
+
+ return newpos;
+}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.h b/drivers/net/wireless/ath/wil6210/pmc.h
new file mode 100644
index 000000000000..bebc8d52e1e6
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pmc.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+
+#define PCM_DATA_INVALID_DW_VAL (0xB0BA0000)
+
+void wil_pmc_init(struct wil6210_priv *wil);
+void wil_pmc_alloc(struct wil6210_priv *wil,
+ int num_descriptors, int descriptor_size);
+void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd);
+int wil_pmc_last_cmd_status(struct wil6210_priv *wil);
+ssize_t wil_pmc_read(struct file *, char __user *, size_t, loff_t *);
+loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index e8bd512d81a9..0113dac3a9a9 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -236,7 +236,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
return -ENOMEM;
}
- d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+ d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
wil_desc_addr_set(&d->dma.addr, pa);
/* ip_length don't care */
/* b11 don't care */
@@ -724,6 +724,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+ if (!wil->privacy)
+ txdata->dot1x_open = true;
rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
if (rc)
@@ -738,11 +740,13 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->enabled = 1;
- if (wil->sta[cid].data_port_open && (agg_wsize >= 0))
+ if (txdata->dot1x_open && (agg_wsize >= 0))
wil_addba_tx_request(wil, id, agg_wsize);
return 0;
out_free:
+ txdata->dot1x_open = false;
+ txdata->enabled = 0;
wil_vring_free(wil, vring, 1);
out:
@@ -792,6 +796,8 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+ if (!wil->privacy)
+ txdata->dot1x_open = true;
rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
if (rc)
@@ -809,6 +815,8 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
return 0;
out_free:
+ txdata->enabled = 0;
+ txdata->dot1x_open = false;
wil_vring_free(wil, vring, 1);
out:
@@ -828,6 +836,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = false;
txdata->enabled = 0; /* no Tx can be in progress or start anew */
spin_unlock_bh(&txdata->lock);
/* make sure NAPI won't touch this vring */
@@ -848,12 +857,11 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
if (cid < 0)
return NULL;
- if (!wil->sta[cid].data_port_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
- return NULL;
-
/* TODO: fix for multiple TID */
for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
+ if (!wil->vring_tx_data[i].dot1x_open &&
+ (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ continue;
if (wil->vring2cid_tid[i][0] == cid) {
struct vring *v = &wil->vring_tx[i];
@@ -883,7 +891,7 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
/* In the STA mode, it is expected to have only 1 VRING
* for the AP we connected to.
- * find 1-st vring and see whether it is eligible for data
+ * find 1-st vring eligible for this skb and use it.
*/
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
v = &wil->vring_tx[i];
@@ -894,9 +902,9 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->sta[cid].data_port_open &&
+ if (!wil->vring_tx_data[i].dot1x_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
- break;
+ continue;
wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
@@ -918,7 +926,6 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
* in all cases override dest address to unicast peer's address
* Use old strategy when new is not supported yet:
* - for PBSS
- * - for secure link
*/
static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
struct sk_buff *skb)
@@ -931,6 +938,9 @@ static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
v = &wil->vring_tx[i];
if (!v->va)
return NULL;
+ if (!wil->vring_tx_data[i].dot1x_open &&
+ (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ return NULL;
return v;
}
@@ -963,7 +973,8 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
cid = wil->vring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->sta[cid].data_port_open)
+ if (!wil->vring_tx_data[i].dot1x_open &&
+ (skb->protocol != cpu_to_be16(ETH_P_PAE)))
continue;
/* don't Tx back to source when re-routing Rx->Tx at the AP */
@@ -989,7 +1000,8 @@ found:
cid = wil->vring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->sta[cid].data_port_open)
+ if (!wil->vring_tx_data[i].dot1x_open &&
+ (skb->protocol != cpu_to_be16(ETH_P_PAE)))
continue;
if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
@@ -1016,9 +1028,6 @@ static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
if (wdev->iftype != NL80211_IFTYPE_AP)
return wil_find_tx_bcast_2(wil, skb);
- if (wil->privacy)
- return wil_find_tx_bcast_2(wil, skb);
-
return wil_find_tx_bcast_1(wil, skb);
}
@@ -1144,13 +1153,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_tx_desc_map(d, pa, len, vring_index);
if (unlikely(mcast)) {
d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
- if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) {
- /* set MCS 1 */
+ if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
- /* packet mode 2 */
- d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) |
- (2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS);
- }
}
/* Process TCP/UDP checksum offloading */
if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index d90c8aa20c15..0c4638487c74 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -384,19 +384,27 @@ struct vring_rx_mac {
* [word 7] length
*/
-#define RX_DMA_D0_CMD_DMA_IT BIT(10)
-
-/* Error field, offload bits */
-#define RX_DMA_ERROR_L3_ERR BIT(4)
-#define RX_DMA_ERROR_L4_ERR BIT(5)
+#define RX_DMA_D0_CMD_DMA_EOP BIT(8)
+#define RX_DMA_D0_CMD_DMA_RT BIT(9) /* always 1 */
+#define RX_DMA_D0_CMD_DMA_IT BIT(10) /* interrupt */
+
+/* Error field */
+#define RX_DMA_ERROR_FCS BIT(0)
+#define RX_DMA_ERROR_MIC BIT(1)
+#define RX_DMA_ERROR_KEY BIT(2) /* Key missing */
+#define RX_DMA_ERROR_REPLAY BIT(3)
+#define RX_DMA_ERROR_L3_ERR BIT(4)
+#define RX_DMA_ERROR_L4_ERR BIT(5)
/* Status field */
-#define RX_DMA_STATUS_DU BIT(0)
-#define RX_DMA_STATUS_ERROR BIT(2)
-
+#define RX_DMA_STATUS_DU BIT(0)
+#define RX_DMA_STATUS_EOP BIT(1)
+#define RX_DMA_STATUS_ERROR BIT(2)
+#define RX_DMA_STATUS_MI BIT(3) /* MAC Interrupt is asserted */
#define RX_DMA_STATUS_L3I BIT(4)
#define RX_DMA_STATUS_L4I BIT(5)
#define RX_DMA_STATUS_PHY_INFO BIT(6)
+#define RX_DMA_STATUS_FFM BIT(7) /* EtherType Flex Filter Match */
struct vring_rx_dma {
u32 d0;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 4310972c9e16..f3513a1fa424 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -21,6 +21,7 @@
#include <linux/wireless.h>
#include <net/cfg80211.h>
#include <linux/timex.h>
+#include <linux/types.h>
#include "wil_platform.h"
extern bool no_fw_recovery;
@@ -29,10 +30,11 @@ extern unsigned short rx_ring_overflow_thrsh;
extern int agg_wsize;
extern u32 vring_idle_trsh;
extern bool rx_align_2;
+extern bool debug_fw;
#define WIL_NAME "wil6210"
#define WIL_FW_NAME "wil6210.fw" /* code */
-#define WIL_FW2_NAME "wil6210.board" /* board & radio parameters */
+#define WIL_FW2_NAME "wil6210.brd" /* board & radio parameters */
#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
@@ -396,6 +398,7 @@ struct vring {
* Additional data for Tx Vring
*/
struct vring_tx_data {
+ bool dot1x_open;
int enabled;
cycles_t idle, last_idle, begin;
u8 agg_wsize; /* agreed aggregation window, 0 - no agg */
@@ -484,7 +487,6 @@ struct wil_sta_info {
u8 addr[ETH_ALEN];
enum wil_sta_status status;
struct wil_net_stats stats;
- bool data_port_open; /* can send any data, not only EAPOL */
/* Rx BACK */
struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
spinlock_t tid_rx_lock; /* guarding tid_rx array */
@@ -526,6 +528,17 @@ struct wil_probe_client_req {
u8 cid;
};
+struct pmc_ctx {
+ /* alloc, free, and read operations must own the lock */
+ struct mutex lock;
+ struct vring_tx_desc *pring_va;
+ dma_addr_t pring_pa;
+ struct desc_alloc_info *descriptors;
+ int last_cmd_status;
+ int num_descriptors;
+ int descriptor_size;
+};
+
struct wil6210_priv {
struct pci_dev *pdev;
int n_msi;
@@ -610,6 +623,8 @@ struct wil6210_priv {
void *platform_handle;
struct wil_platform_ops platform_ops;
+
+ struct pmc_ctx pmc;
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -701,9 +716,10 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
int wmi_set_channel(struct wil6210_priv *wil, int channel);
int wmi_get_channel(struct wil6210_priv *wil, int *channel);
int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
- const void *mac_addr);
+ const void *mac_addr, int key_usage);
int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
- const void *mac_addr, int key_len, const void *key);
+ const void *mac_addr, int key_len, const void *key,
+ int key_usage);
int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 9fe2085be2c5..3dc8daf69bd2 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -543,55 +543,22 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
}
}
-static void wil_addba_tx_cid(struct wil6210_priv *wil, u8 cid, u16 wsize)
+static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len)
{
- struct vring_tx_data *t;
- int i;
+ struct wmi_vring_en_event *evt = d;
+ u8 vri = evt->vring_index;
- for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- if (cid != wil->vring2cid_tid[i][0])
- continue;
- t = &wil->vring_tx_data[i];
- if (!t->enabled)
- continue;
+ wil_dbg_wmi(wil, "Enable vring %d\n", vri);
- wil_addba_tx_request(wil, i, wsize);
- }
-}
-
-static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
-{
- struct wmi_data_port_open_event *evt = d;
- u8 cid = evt->cid;
-
- wil_dbg_wmi(wil, "Link UP for CID %d\n", cid);
-
- if (cid >= ARRAY_SIZE(wil->sta)) {
- wil_err(wil, "Link UP for invalid CID %d\n", cid);
+ if (vri >= ARRAY_SIZE(wil->vring_tx)) {
+ wil_err(wil, "Enable for invalid vring %d\n", vri);
return;
}
-
- wil->sta[cid].data_port_open = true;
- if (agg_wsize >= 0)
- wil_addba_tx_cid(wil, cid, agg_wsize);
-}
-
-static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
-{
- struct net_device *ndev = wil_to_ndev(wil);
- struct wmi_wbe_link_down_event *evt = d;
- u8 cid = evt->cid;
-
- wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
- cid, le32_to_cpu(evt->reason));
-
- if (cid >= ARRAY_SIZE(wil->sta)) {
- wil_err(wil, "Link DOWN for invalid CID %d\n", cid);
+ wil->vring_tx_data[vri].dot1x_open = true;
+ if (vri == wil->bcast_vring) /* no BA for bcast */
return;
- }
-
- wil->sta[cid].data_port_open = false;
- netif_carrier_off(ndev);
+ if (agg_wsize >= 0)
+ wil_addba_tx_request(wil, vri, agg_wsize);
}
static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
@@ -695,11 +662,10 @@ static const struct {
{WMI_CONNECT_EVENTID, wmi_evt_connect},
{WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
{WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
- {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup},
- {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown},
{WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
{WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
{WMI_DELBA_EVENTID, wmi_evt_delba},
+ {WMI_VRING_EN_EVENTID, wmi_evt_vring_en},
};
/*
@@ -844,7 +810,7 @@ int wmi_echo(struct wil6210_priv *wil)
};
return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
- WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
+ WMI_ECHO_RSP_EVENTID, NULL, 0, 50);
}
int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
@@ -985,7 +951,7 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
}
int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
- const void *mac_addr)
+ const void *mac_addr, int key_usage)
{
struct wmi_delete_cipher_key_cmd cmd = {
.key_index = key_index,
@@ -998,11 +964,12 @@ int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
}
int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
- const void *mac_addr, int key_len, const void *key)
+ const void *mac_addr, int key_len, const void *key,
+ int key_usage)
{
struct wmi_add_cipher_key_cmd cmd = {
.key_index = key_index,
- .key_usage = WMI_KEY_USE_PAIRWISE,
+ .key_usage = key_usage,
.key_len = key_len,
};
@@ -1238,7 +1205,8 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-");
rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, &cmd, sizeof(cmd),
- WMI_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), 100);
+ WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply),
+ 100);
if (rc)
return rc;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index b29055315350..cc04ab73b398 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
* Copyright (c) 2006-2012 Wilocity .
*
* Permission to use, copy, modify, and/or distribute this software for any
@@ -253,8 +253,8 @@ struct wmi_set_passphrase_cmd {
*/
enum wmi_key_usage {
WMI_KEY_USE_PAIRWISE = 0,
- WMI_KEY_USE_GROUP = 1,
- WMI_KEY_USE_TX = 2, /* default Tx Key - Static WEP only */
+ WMI_KEY_USE_RX_GROUP = 1,
+ WMI_KEY_USE_TX_GROUP = 2,
};
struct wmi_add_cipher_key_cmd {
@@ -836,6 +836,21 @@ struct wmi_temp_sense_cmd {
} __packed;
/*
+ * WMI_PMC_CMDID
+ */
+enum wmi_pmc_op_e {
+ WMI_PMC_ALLOCATE = 0,
+ WMI_PMC_RELEASE = 1,
+};
+
+struct wmi_pmc_cmd {
+ u8 op; /* enum wmi_pmc_cmd_op_type */
+ u8 reserved;
+ __le16 ring_size;
+ __le64 mem_base;
+} __packed;
+
+/*
* WMI Events
*/
@@ -870,7 +885,7 @@ enum wmi_event_id {
WMI_VRING_CFG_DONE_EVENTID = 0x1821,
WMI_BA_STATUS_EVENTID = 0x1823,
WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
- WMI_ADDBA_RESP_SENT_EVENTID = 0x1825,
+ WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
WMI_DELBA_EVENTID = 0x1826,
WMI_GET_SSID_EVENTID = 0x1828,
WMI_GET_PCP_CHANNEL_EVENTID = 0x182a,
@@ -882,7 +897,7 @@ enum wmi_event_id {
WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
- WMI_BEAFORMING_MGMT_DONE_EVENTID = 0x1836,
+ WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
WMI_RS_MGMT_DONE_EVENTID = 0x1852,
@@ -894,11 +909,12 @@ enum wmi_event_id {
/* Performance monitoring events */
WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
- WMI_WBE_LINKDOWN_EVENTID = 0x1861,
+ WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
WMI_BF_CTRL_DONE_EVENTID = 0x1862,
WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
WMI_GET_STATUS_DONE_EVENTID = 0x1864,
+ WMI_VRING_EN_EVENTID = 0x1865,
WMI_UNIT_TEST_EVENTID = 0x1900,
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
@@ -1147,7 +1163,7 @@ struct wmi_vring_cfg_done_event {
} __packed;
/*
- * WMI_ADDBA_RESP_SENT_EVENTID
+ * WMI_RCP_ADDBA_RESP_SENT_EVENTID
*/
struct wmi_rcp_addba_resp_sent_event {
u8 cidxtid;
@@ -1179,7 +1195,7 @@ struct wmi_cfg_rx_chain_done_event {
} __packed;
/*
- * WMI_WBE_LINKDOWN_EVENTID
+ * WMI_WBE_LINK_DOWN_EVENTID
*/
enum wmi_wbe_link_down_event_reason {
WMI_WBE_REASON_USER_REQUEST = 0,
@@ -1202,6 +1218,14 @@ struct wmi_data_port_open_event {
} __packed;
/*
+ * WMI_VRING_EN_EVENTID
+ */
+struct wmi_vring_en_event {
+ u8 vring_index;
+ u8 reserved[3];
+} __packed;
+
+/*
* WMI_GET_PCP_CHANNEL_EVENTID
*/
struct wmi_get_pcp_channel_event {
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index b2f9521fe551..f40992969b4a 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3131,8 +3131,6 @@ static void b43_adjust_opmode(struct b43_wldev *dev)
ctl |= B43_MACCTL_KEEP_BAD;
if (wl->filter_flags & FIF_PLCPFAIL)
ctl |= B43_MACCTL_KEEP_BADPLCP;
- if (wl->filter_flags & FIF_PROMISC_IN_BSS)
- ctl |= B43_MACCTL_PROMISC;
if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC)
ctl |= B43_MACCTL_BEACPROMISC;
@@ -4310,16 +4308,14 @@ static void b43_op_configure_filter(struct ieee80211_hw *hw,
goto out_unlock;
}
- *fflags &= FIF_PROMISC_IN_BSS |
- FIF_ALLMULTI |
+ *fflags &= FIF_ALLMULTI |
FIF_FCSFAIL |
FIF_PLCPFAIL |
FIF_CONTROL |
FIF_OTHER_BSS |
FIF_BCN_PRBRESP_PROMISC;
- changed &= FIF_PROMISC_IN_BSS |
- FIF_ALLMULTI |
+ changed &= FIF_ALLMULTI |
FIF_FCSFAIL |
FIF_PLCPFAIL |
FIF_CONTROL |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index c77b7f59505c..39d49d6cd07f 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2055,8 +2055,6 @@ static void b43legacy_adjust_opmode(struct b43legacy_wldev *dev)
ctl |= B43legacy_MACCTL_KEEP_BAD;
if (wl->filter_flags & FIF_PLCPFAIL)
ctl |= B43legacy_MACCTL_KEEP_BADPLCP;
- if (wl->filter_flags & FIF_PROMISC_IN_BSS)
- ctl |= B43legacy_MACCTL_PROMISC;
if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC)
ctl |= B43legacy_MACCTL_BEACPROMISC;
@@ -2922,16 +2920,14 @@ static void b43legacy_op_configure_filter(struct ieee80211_hw *hw,
}
spin_lock_irqsave(&wl->irq_lock, flags);
- *fflags &= FIF_PROMISC_IN_BSS |
- FIF_ALLMULTI |
+ *fflags &= FIF_ALLMULTI |
FIF_FCSFAIL |
FIF_PLCPFAIL |
FIF_CONTROL |
FIF_OTHER_BSS |
FIF_BCN_PRBRESP_PROMISC;
- changed &= FIF_PROMISC_IN_BSS |
- FIF_ALLMULTI |
+ changed &= FIF_ALLMULTI |
FIF_FCSFAIL |
FIF_PLCPFAIL |
FIF_CONTROL |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 9b508bd3b839..71779b9e4bbe 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -33,6 +33,7 @@
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/module.h>
+#include <linux/acpi.h>
#include <net/cfg80211.h>
#include <defs.h>
@@ -1011,6 +1012,14 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
return 0;
}
+static void brcmf_sdiod_host_fixup(struct mmc_host *host)
+{
+ /* runtime-pm powers off the device */
+ pm_runtime_forbid(host->parent);
+ /* avoid removal detection upon resume */
+ host->caps |= MMC_CAP_NONREMOVABLE;
+}
+
static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
{
struct sdio_func *func;
@@ -1076,7 +1085,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
ret = -ENODEV;
goto out;
}
- pm_runtime_forbid(host->parent);
+ brcmf_sdiod_host_fixup(host);
out:
if (ret)
brcmf_sdiod_remove(sdiodev);
@@ -1108,12 +1117,25 @@ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
+static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
+ int val)
+{
+#if IS_ENABLED(CONFIG_ACPI)
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(dev);
+ if (adev)
+ adev->flags.power_manageable = 0;
+#endif
+}
+
static int brcmf_ops_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
int err;
struct brcmf_sdio_dev *sdiodev;
struct brcmf_bus *bus_if;
+ struct device *dev;
brcmf_dbg(SDIO, "Enter\n");
brcmf_dbg(SDIO, "Class=%x\n", func->class);
@@ -1121,6 +1143,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
brcmf_dbg(SDIO, "Function#: %d\n", func->num);
+ dev = &func->dev;
+ /* prohibit ACPI power management for this device */
+ brcmf_sdiod_acpi_set_power_manageable(dev, 0);
+
/* Consume func num 1 but dont do anything with it. */
if (func->num == 1)
return 0;
@@ -1246,15 +1272,15 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
brcmf_sdiod_freezer_on(sdiodev);
brcmf_sdio_wd_timer(sdiodev->bus, 0);
+ sdio_flags = MMC_PM_KEEP_POWER;
if (sdiodev->wowl_enabled) {
- sdio_flags = MMC_PM_KEEP_POWER;
if (sdiodev->pdata->oob_irq_supported)
enable_irq_wake(sdiodev->pdata->oob_irq_nr);
else
- sdio_flags = MMC_PM_WAKE_SDIO_IRQ;
- if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
- brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
+ sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
}
+ if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
+ brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index 8a15ebbce4a3..e10fa67010c0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -129,13 +129,47 @@ static struct ieee80211_rate __wl_rates[] = {
RATETAB_ENT(BRCM_RATE_54M, 0),
};
-#define wl_a_rates (__wl_rates + 4)
-#define wl_a_rates_size 8
#define wl_g_rates (__wl_rates + 0)
-#define wl_g_rates_size 12
+#define wl_g_rates_size ARRAY_SIZE(__wl_rates)
+#define wl_a_rates (__wl_rates + 4)
+#define wl_a_rates_size (wl_g_rates_size - 4)
+
+#define CHAN2G(_channel, _freq) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = IEEE80211_CHAN_DISABLED, \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = IEEE80211_CHAN_DISABLED, \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+ CHAN2G(1, 2412), CHAN2G(2, 2417), CHAN2G(3, 2422), CHAN2G(4, 2427),
+ CHAN2G(5, 2432), CHAN2G(6, 2437), CHAN2G(7, 2442), CHAN2G(8, 2447),
+ CHAN2G(9, 2452), CHAN2G(10, 2457), CHAN2G(11, 2462), CHAN2G(12, 2467),
+ CHAN2G(13, 2472), CHAN2G(14, 2484)
+};
+
+static struct ieee80211_channel __wl_5ghz_channels[] = {
+ CHAN5G(34), CHAN5G(36), CHAN5G(38), CHAN5G(40), CHAN5G(42),
+ CHAN5G(44), CHAN5G(46), CHAN5G(48), CHAN5G(52), CHAN5G(56),
+ CHAN5G(60), CHAN5G(64), CHAN5G(100), CHAN5G(104), CHAN5G(108),
+ CHAN5G(112), CHAN5G(116), CHAN5G(120), CHAN5G(124), CHAN5G(128),
+ CHAN5G(132), CHAN5G(136), CHAN5G(140), CHAN5G(144), CHAN5G(149),
+ CHAN5G(153), CHAN5G(157), CHAN5G(161), CHAN5G(165)
+};
/* Band templates duplicated per wiphy. The channel info
- * is filled in after querying the device.
+ * above is added to the band during setup.
*/
static const struct ieee80211_supported_band __wl_band_2ghz = {
.band = IEEE80211_BAND_2GHZ,
@@ -143,7 +177,7 @@ static const struct ieee80211_supported_band __wl_band_2ghz = {
.n_bitrates = wl_g_rates_size,
};
-static const struct ieee80211_supported_band __wl_band_5ghz_a = {
+static const struct ieee80211_supported_band __wl_band_5ghz = {
.band = IEEE80211_BAND_5GHZ,
.bitrates = wl_a_rates,
.n_bitrates = wl_a_rates_size,
@@ -1262,7 +1296,7 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
}
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0,
- GFP_KERNEL);
+ true, GFP_KERNEL);
}
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
@@ -1928,7 +1962,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
- cfg80211_disconnected(ndev, reason_code, NULL, 0, GFP_KERNEL);
+ cfg80211_disconnected(ndev, reason_code, NULL, 0, true, GFP_KERNEL);
memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
scbval.val = cpu_to_le32(reason_code);
@@ -5253,40 +5287,6 @@ dongle_scantime_out:
return err;
}
-/* Filter the list of channels received from firmware counting only
- * the 20MHz channels. The wiphy band data only needs those which get
- * flagged to indicate if they can take part in higher bandwidth.
- */
-static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg,
- struct brcmf_chanspec_list *chlist,
- u32 chcnt[])
-{
- u32 total = le32_to_cpu(chlist->count);
- struct brcmu_chan ch;
- int i;
-
- for (i = 0; i < total; i++) {
- ch.chspec = (u16)le32_to_cpu(chlist->element[i]);
- cfg->d11inf.decchspec(&ch);
-
- /* Firmware gives a ordered list. We skip non-20MHz
- * channels is 2G. For 5G we can abort upon reaching
- * a non-20MHz channel in the list.
- */
- if (ch.bw != BRCMU_CHAN_BW_20) {
- if (ch.band == BRCMU_CHAN_BAND_5G)
- break;
- else
- continue;
- }
-
- if (ch.band == BRCMU_CHAN_BAND_2G)
- chcnt[0] += 1;
- else if (ch.band == BRCMU_CHAN_BAND_5G)
- chcnt[1] += 1;
- }
-}
-
static void brcmf_update_bw40_channel_flag(struct ieee80211_channel *channel,
struct brcmu_chan *ch)
{
@@ -5322,7 +5322,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
u32 i, j;
u32 total;
u32 chaninfo;
- u32 chcnt[2] = { 0, 0 };
u32 index;
pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
@@ -5339,42 +5338,15 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
goto fail_pbuf;
}
- brcmf_count_20mhz_channels(cfg, list, chcnt);
wiphy = cfg_to_wiphy(cfg);
- if (chcnt[0]) {
- band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
- GFP_KERNEL);
- if (band == NULL) {
- err = -ENOMEM;
- goto fail_pbuf;
- }
- band->channels = kcalloc(chcnt[0], sizeof(*channel),
- GFP_KERNEL);
- if (band->channels == NULL) {
- kfree(band);
- err = -ENOMEM;
- goto fail_pbuf;
- }
- band->n_channels = 0;
- wiphy->bands[IEEE80211_BAND_2GHZ] = band;
- }
- if (chcnt[1]) {
- band = kmemdup(&__wl_band_5ghz_a, sizeof(__wl_band_5ghz_a),
- GFP_KERNEL);
- if (band == NULL) {
- err = -ENOMEM;
- goto fail_band2g;
- }
- band->channels = kcalloc(chcnt[1], sizeof(*channel),
- GFP_KERNEL);
- if (band->channels == NULL) {
- kfree(band);
- err = -ENOMEM;
- goto fail_band2g;
- }
- band->n_channels = 0;
- wiphy->bands[IEEE80211_BAND_5GHZ] = band;
- }
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ if (band)
+ for (i = 0; i < band->n_channels; i++)
+ band->channels[i].flags = IEEE80211_CHAN_DISABLED;
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ if (band)
+ for (i = 0; i < band->n_channels; i++)
+ band->channels[i].flags = IEEE80211_CHAN_DISABLED;
total = le32_to_cpu(list->count);
for (i = 0; i < total; i++) {
@@ -5389,6 +5361,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
continue;
}
+ if (!band)
+ continue;
if (!(bw_cap[band->band] & WLC_BW_40MHZ_BIT) &&
ch.bw == BRCMU_CHAN_BW_40)
continue;
@@ -5416,9 +5390,9 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
} else if (ch.bw == BRCMU_CHAN_BW_40) {
brcmf_update_bw40_channel_flag(&channel[index], &ch);
} else {
- /* disable other bandwidths for now as mentioned
- * order assure they are enabled for subsequent
- * chanspecs.
+ /* enable the channel and disable other bandwidths
+ * for now as mentioned order assure they are enabled
+ * for subsequent chanspecs.
*/
channel[index].flags = IEEE80211_CHAN_NO_HT40 |
IEEE80211_CHAN_NO_80MHZ;
@@ -5437,16 +5411,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
IEEE80211_CHAN_NO_IR;
}
}
- if (index == band->n_channels)
- band->n_channels++;
}
- kfree(pbuf);
- return 0;
-fail_band2g:
- kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
- kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
- wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
fail_pbuf:
kfree(pbuf);
return err;
@@ -5779,7 +5745,12 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy)
static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
{
+ struct ieee80211_supported_band *band;
struct ieee80211_iface_combination ifc_combo;
+ __le32 bandlist[3];
+ u32 n_bands;
+ int err, i;
+
wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
@@ -5812,7 +5783,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
- brcmf_wiphy_pno_params(wiphy);
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO))
+ brcmf_wiphy_pno_params(wiphy);
/* vendor commands/events support */
wiphy->vendor_commands = brcmf_vendor_cmds;
@@ -5821,7 +5793,52 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL))
brcmf_wiphy_wowl_params(wiphy);
- return brcmf_setup_wiphybands(wiphy);
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST, &bandlist,
+ sizeof(bandlist));
+ if (err) {
+ brcmf_err("could not obtain band info: err=%d\n", err);
+ return err;
+ }
+ /* first entry in bandlist is number of bands */
+ n_bands = le32_to_cpu(bandlist[0]);
+ for (i = 1; i <= n_bands && i < ARRAY_SIZE(bandlist); i++) {
+ if (bandlist[i] == cpu_to_le32(WLC_BAND_2G)) {
+ band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
+ GFP_KERNEL);
+ if (!band)
+ return -ENOMEM;
+
+ band->channels = kmemdup(&__wl_2ghz_channels,
+ sizeof(__wl_2ghz_channels),
+ GFP_KERNEL);
+ if (!band->channels) {
+ kfree(band);
+ return -ENOMEM;
+ }
+
+ band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
+ wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+ }
+ if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) {
+ band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz),
+ GFP_KERNEL);
+ if (!band)
+ return -ENOMEM;
+
+ band->channels = kmemdup(&__wl_5ghz_channels,
+ sizeof(__wl_5ghz_channels),
+ GFP_KERNEL);
+ if (!band->channels) {
+ kfree(band);
+ return -ENOMEM;
+ }
+
+ band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
+ wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+ }
+ }
+ err = brcmf_setup_wiphybands(wiphy);
+ return err;
}
static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
@@ -6007,11 +6024,18 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
memset(&ccreq, 0, sizeof(ccreq));
ccreq.rev = cpu_to_le32(-1);
memcpy(ccreq.ccode, req->alpha2, sizeof(req->alpha2));
- brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq));
+ if (brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq))) {
+ brcmf_err("firmware rejected country setting\n");
+ return;
+ }
+ brcmf_setup_wiphybands(wiphy);
}
static void brcmf_free_wiphy(struct wiphy *wiphy)
{
+ if (!wiphy)
+ return;
+
kfree(wiphy->iface_combinations);
if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index ab2fac8b2760..288f8314f208 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -649,6 +649,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_43567_CHIP_ID:
case BRCM_CC_43569_CHIP_ID:
case BRCM_CC_43570_CHIP_ID:
+ case BRCM_CC_4358_CHIP_ID:
case BRCM_CC_43602_CHIP_ID:
return 0x180000;
default:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
index 77656c711bed..26c65872dae3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
@@ -22,17 +22,6 @@
#include "core.h"
#include "commonring.h"
-
-/* dma flushing needs implementation for mips and arm platforms. Should
- * be put in util. Note, this is not real flushing. It is virtual non
- * cached memory. Only write buffers should have to be drained. Though
- * this may be different depending on platform......
- * SEE ALSO msgbuf.c
- */
-#define brcmf_dma_flush(addr, len)
-#define brcmf_dma_invalidate_cache(addr, len)
-
-
void brcmf_commonring_register_cb(struct brcmf_commonring *commonring,
int (*cr_ring_bell)(void *ctx),
int (*cr_update_rptr)(void *ctx),
@@ -206,14 +195,9 @@ int brcmf_commonring_write_complete(struct brcmf_commonring *commonring)
address = commonring->buf_addr;
address += (commonring->f_ptr * commonring->item_len);
if (commonring->f_ptr > commonring->w_ptr) {
- brcmf_dma_flush(address,
- (commonring->depth - commonring->f_ptr) *
- commonring->item_len);
address = commonring->buf_addr;
commonring->f_ptr = 0;
}
- brcmf_dma_flush(address, (commonring->w_ptr - commonring->f_ptr) *
- commonring->item_len);
commonring->f_ptr = commonring->w_ptr;
@@ -258,8 +242,6 @@ void *brcmf_commonring_get_read_ptr(struct brcmf_commonring *commonring,
if (commonring->r_ptr == commonring->depth)
commonring->r_ptr = 0;
- brcmf_dma_invalidate_cache(ret_addr, *n_ items * commonring->item_len);
-
return ret_addr;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
index 7748a1ccf14f..2c5fad3a3aa2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
@@ -124,6 +124,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
struct brcmf_if *ifp = drvr->iflist[0];
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
+ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
if (drvr->bus_if->wowl_supported)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/brcm80211/brcmfmac/feature.h
index f5832e077bb7..546962525cd2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.h
@@ -19,11 +19,15 @@
/*
* Features:
*
+ * MBSS: multiple BSSID support (eg. guest network in AP mode).
* MCHAN: multi-channel for concurrent P2P.
+ * PNO: preferred network offload.
+ * WOWL: Wake-On-WLAN.
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
BRCMF_FEAT_DEF(MCHAN) \
+ BRCMF_FEAT_DEF(PNO) \
BRCMF_FEAT_DEF(WOWL)
/*
* Quirks:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
index 9cb99152ad17..7ae6461df932 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -23,6 +23,10 @@
#include "debug.h"
#include "firmware.h"
+#define BRCMF_FW_MAX_NVRAM_SIZE 64000
+#define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */
+#define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */
+
char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
module_param_string(firmware_path, brcmf_firmware_path,
BRCMF_FW_PATH_LEN, 0440);
@@ -46,6 +50,8 @@ enum nvram_parser_state {
* @column: current column in line.
* @pos: byte offset in input buffer.
* @entry: start position of key,value entry.
+ * @multi_dev_v1: detect pcie multi device v1 (compressed).
+ * @multi_dev_v2: detect pcie multi device v2.
*/
struct nvram_parser {
enum nvram_parser_state state;
@@ -56,8 +62,16 @@ struct nvram_parser {
u32 column;
u32 pos;
u32 entry;
+ bool multi_dev_v1;
+ bool multi_dev_v2;
};
+/**
+ * is_nvram_char() - check if char is a valid one for NVRAM entry
+ *
+ * It accepts all printable ASCII chars except for '#' which opens a comment.
+ * Please note that ' ' (space) while accepted is not a valid key name char.
+ */
static bool is_nvram_char(char c)
{
/* comment marker excluded */
@@ -65,7 +79,7 @@ static bool is_nvram_char(char c)
return false;
/* key and value may have any other readable character */
- return (c > 0x20 && c < 0x7f);
+ return (c >= 0x20 && c < 0x7f);
}
static bool is_whitespace(char c)
@@ -108,7 +122,11 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
st = COMMENT;
else
st = VALUE;
- } else if (!is_nvram_char(c)) {
+ if (strncmp(&nvp->fwnv->data[nvp->entry], "devpath", 7) == 0)
+ nvp->multi_dev_v1 = true;
+ if (strncmp(&nvp->fwnv->data[nvp->entry], "pcie/", 5) == 0)
+ nvp->multi_dev_v2 = true;
+ } else if (!is_nvram_char(c) || c == ' ') {
brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
nvp->line, nvp->column);
return COMMENT;
@@ -133,6 +151,8 @@ brcmf_nvram_handle_value(struct nvram_parser *nvp)
ekv = (u8 *)&nvp->fwnv->data[nvp->pos];
skv = (u8 *)&nvp->fwnv->data[nvp->entry];
cplen = ekv - skv;
+ if (nvp->nvram_len + cplen + 1 >= BRCMF_FW_MAX_NVRAM_SIZE)
+ return END;
/* copy to output buffer */
memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
nvp->nvram_len += cplen;
@@ -148,17 +168,20 @@ brcmf_nvram_handle_value(struct nvram_parser *nvp)
static enum nvram_parser_state
brcmf_nvram_handle_comment(struct nvram_parser *nvp)
{
- char *eol, *sol;
+ char *eoc, *sol;
sol = (char *)&nvp->fwnv->data[nvp->pos];
- eol = strchr(sol, '\n');
- if (eol == NULL)
- return END;
+ eoc = strchr(sol, '\n');
+ if (!eoc) {
+ eoc = strchr(sol, '\0');
+ if (!eoc)
+ return END;
+ }
/* eat all moving to next line */
nvp->line++;
nvp->column = 1;
- nvp->pos += (eol - sol) + 1;
+ nvp->pos += (eoc - sol) + 1;
return IDLE;
}
@@ -180,10 +203,18 @@ static enum nvram_parser_state
static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
const struct firmware *nv)
{
+ size_t size;
+
memset(nvp, 0, sizeof(*nvp));
nvp->fwnv = nv;
+ /* Limit size to MAX_NVRAM_SIZE, some files contain lot of comment */
+ if (nv->size > BRCMF_FW_MAX_NVRAM_SIZE)
+ size = BRCMF_FW_MAX_NVRAM_SIZE;
+ else
+ size = nv->size;
/* Alloc for extra 0 byte + roundup by 4 + length field */
- nvp->nvram = kzalloc(nv->size + 1 + 3 + sizeof(u32), GFP_KERNEL);
+ size += 1 + 3 + sizeof(u32);
+ nvp->nvram = kzalloc(size, GFP_KERNEL);
if (!nvp->nvram)
return -ENOMEM;
@@ -192,12 +223,141 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
return 0;
}
+/* brcmf_fw_strip_multi_v1 :Some nvram files contain settings for multiple
+ * devices. Strip it down for one device, use domain_nr/bus_nr to determine
+ * which data is to be returned. v1 is the version where nvram is stored
+ * compressed and "devpath" maps to index for valid entries.
+ */
+static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr,
+ u16 bus_nr)
+{
+ /* Device path with a leading '=' key-value separator */
+ char pcie_path[] = "=pcie/?/?";
+ size_t pcie_len;
+
+ u32 i, j;
+ bool found;
+ u8 *nvram;
+ u8 id;
+
+ nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
+ if (!nvram)
+ goto fail;
+
+ /* min length: devpath0=pcie/1/4/ + 0:x=y */
+ if (nvp->nvram_len < BRCMF_FW_NVRAM_DEVPATH_LEN + 6)
+ goto fail;
+
+ /* First search for the devpathX and see if it is the configuration
+ * for domain_nr/bus_nr. Search complete nvp
+ */
+ snprintf(pcie_path, sizeof(pcie_path), "=pcie/%d/%d", domain_nr,
+ bus_nr);
+ pcie_len = strlen(pcie_path);
+ found = false;
+ i = 0;
+ while (i < nvp->nvram_len - BRCMF_FW_NVRAM_DEVPATH_LEN) {
+ /* Format: devpathX=pcie/Y/Z/
+ * Y = domain_nr, Z = bus_nr, X = virtual ID
+ */
+ if ((strncmp(&nvp->nvram[i], "devpath", 7) == 0) &&
+ (strncmp(&nvp->nvram[i + 8], pcie_path, pcie_len) == 0)) {
+ id = nvp->nvram[i + 7] - '0';
+ found = true;
+ break;
+ }
+ while (nvp->nvram[i] != 0)
+ i++;
+ i++;
+ }
+ if (!found)
+ goto fail;
+
+ /* Now copy all valid entries, release old nvram and assign new one */
+ i = 0;
+ j = 0;
+ while (i < nvp->nvram_len) {
+ if ((nvp->nvram[i] - '0' == id) && (nvp->nvram[i + 1] == ':')) {
+ i += 2;
+ while (nvp->nvram[i] != 0) {
+ nvram[j] = nvp->nvram[i];
+ i++;
+ j++;
+ }
+ nvram[j] = 0;
+ j++;
+ }
+ while (nvp->nvram[i] != 0)
+ i++;
+ i++;
+ }
+ kfree(nvp->nvram);
+ nvp->nvram = nvram;
+ nvp->nvram_len = j;
+ return;
+
+fail:
+ kfree(nvram);
+ nvp->nvram_len = 0;
+}
+
+/* brcmf_fw_strip_multi_v2 :Some nvram files contain settings for multiple
+ * devices. Strip it down for one device, use domain_nr/bus_nr to determine
+ * which data is to be returned. v2 is the version where nvram is stored
+ * uncompressed, all relevant valid entries are identified by
+ * pcie/domain_nr/bus_nr:
+ */
+static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
+ u16 bus_nr)
+{
+ char prefix[BRCMF_FW_NVRAM_PCIEDEV_LEN];
+ size_t len;
+ u32 i, j;
+ u8 *nvram;
+
+ nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
+ if (!nvram)
+ goto fail;
+
+ /* Copy all valid entries, release old nvram and assign new one.
+ * Valid entries are of type pcie/X/Y/ where X = domain_nr and
+ * Y = bus_nr.
+ */
+ snprintf(prefix, sizeof(prefix), "pcie/%d/%d/", domain_nr, bus_nr);
+ len = strlen(prefix);
+ i = 0;
+ j = 0;
+ while (i < nvp->nvram_len - len) {
+ if (strncmp(&nvp->nvram[i], prefix, len) == 0) {
+ i += len;
+ while (nvp->nvram[i] != 0) {
+ nvram[j] = nvp->nvram[i];
+ i++;
+ j++;
+ }
+ nvram[j] = 0;
+ j++;
+ }
+ while (nvp->nvram[i] != 0)
+ i++;
+ i++;
+ }
+ kfree(nvp->nvram);
+ nvp->nvram = nvram;
+ nvp->nvram_len = j;
+ return;
+fail:
+ kfree(nvram);
+ nvp->nvram_len = 0;
+}
+
/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
* and ending in a NUL. Removes carriage returns, empty lines, comment lines,
* and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
* End of buffer is completed with token identifying length of buffer.
*/
-static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
+static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length,
+ u16 domain_nr, u16 bus_nr)
{
struct nvram_parser nvp;
u32 pad;
@@ -212,6 +372,16 @@ static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
if (nvp.state == END)
break;
}
+ if (nvp.multi_dev_v1)
+ brcmf_fw_strip_multi_v1(&nvp, domain_nr, bus_nr);
+ else if (nvp.multi_dev_v2)
+ brcmf_fw_strip_multi_v2(&nvp, domain_nr, bus_nr);
+
+ if (nvp.nvram_len == 0) {
+ kfree(nvp.nvram);
+ return NULL;
+ }
+
pad = nvp.nvram_len;
*new_length = roundup(nvp.nvram_len + 1, 4);
while (pad != *new_length) {
@@ -239,6 +409,8 @@ struct brcmf_fw {
u16 flags;
const struct firmware *code;
const char *nvram_name;
+ u16 domain_nr;
+ u16 bus_nr;
void (*done)(struct device *dev, const struct firmware *fw,
void *nvram_image, u32 nvram_len);
};
@@ -254,7 +426,8 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
goto fail;
if (fw) {
- nvram = brcmf_fw_nvram_strip(fw, &nvram_length);
+ nvram = brcmf_fw_nvram_strip(fw, &nvram_length,
+ fwctx->domain_nr, fwctx->bus_nr);
release_firmware(fw);
if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
goto fail;
@@ -309,11 +482,12 @@ fail:
kfree(fwctx);
}
-int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
- const char *code, const char *nvram,
- void (*fw_cb)(struct device *dev,
- const struct firmware *fw,
- void *nvram_image, u32 nvram_len))
+int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
+ const char *code, const char *nvram,
+ void (*fw_cb)(struct device *dev,
+ const struct firmware *fw,
+ void *nvram_image, u32 nvram_len),
+ u16 domain_nr, u16 bus_nr)
{
struct brcmf_fw *fwctx;
@@ -333,8 +507,21 @@ int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
fwctx->done = fw_cb;
if (flags & BRCMF_FW_REQUEST_NVRAM)
fwctx->nvram_name = nvram;
+ fwctx->domain_nr = domain_nr;
+ fwctx->bus_nr = bus_nr;
return request_firmware_nowait(THIS_MODULE, true, code, dev,
GFP_KERNEL, fwctx,
brcmf_fw_request_code_done);
}
+
+int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
+ const char *code, const char *nvram,
+ void (*fw_cb)(struct device *dev,
+ const struct firmware *fw,
+ void *nvram_image, u32 nvram_len))
+{
+ return brcmf_fw_get_firmwares_pcie(dev, flags, code, nvram, fw_cb, 0,
+ 0);
+}
+
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
index 4d3482356b77..604dd48ab4e0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
@@ -32,6 +32,12 @@ void brcmf_fw_nvram_free(void *nvram);
* fails it will not use the callback, but call device_release_driver()
* instead which will call the driver .remove() callback.
*/
+int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
+ const char *code, const char *nvram,
+ void (*fw_cb)(struct device *dev,
+ const struct firmware *fw,
+ void *nvram_image, u32 nvram_len),
+ u16 domain_nr, u16 bus_nr);
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
const char *code, const char *nvram,
void (*fw_cb)(struct device *dev,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
index eb1325371d3a..59440631fec5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
@@ -249,8 +249,8 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
}
-void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
- struct sk_buff *skb)
+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+ struct sk_buff *skb)
{
struct brcmf_flowring_ring *ring;
@@ -271,6 +271,7 @@ void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
brcmf_flowring_block(flow, flowid, false);
}
+ return skb_queue_len(&ring->skblist);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
index a34cd394c616..5551861a44bc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
@@ -64,8 +64,8 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
-void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
- struct sk_buff *skb);
+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+ struct sk_buff *skb);
struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index f0dda0ecd23b..5017eaa4af45 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -635,7 +635,7 @@ static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h,
return 0;
}
-static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
+static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
u32 slot_id, struct sk_buff **pktout,
bool remove_item)
{
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 65efb1468988..1b47de067d25 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -73,7 +73,7 @@
#define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
#define BRCMF_MSGBUF_TX_FLUSH_CNT2 96
-#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 64
+#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96
#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32
struct msgbuf_common_hdr {
@@ -278,16 +278,6 @@ struct brcmf_msgbuf_pktids {
struct brcmf_msgbuf_pktid *array;
};
-
-/* dma flushing needs implementation for mips and arm platforms. Should
- * be put in util. Note, this is not real flushing. It is virtual non
- * cached memory. Only write buffers should have to be drained. Though
- * this may be different depending on platform......
- */
-#define brcmf_dma_flush(addr, len)
-#define brcmf_dma_invalidate_cache(addr, len)
-
-
static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
@@ -462,7 +452,6 @@ static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
memcpy(msgbuf->ioctbuf, buf, buf_len);
else
memset(msgbuf->ioctbuf, 0, buf_len);
- brcmf_dma_flush(ioctl_buf, buf_len);
err = brcmf_commonring_write_complete(commonring);
brcmf_commonring_unlock(commonring);
@@ -795,6 +784,8 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
struct brcmf_flowring *flow = msgbuf->flow;
struct ethhdr *eh = (struct ethhdr *)(skb->data);
u32 flowid;
+ u32 queue_count;
+ bool force;
flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
if (flowid == BRCMF_FLOWRING_INVALID_ID) {
@@ -802,8 +793,9 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
if (flowid == BRCMF_FLOWRING_INVALID_ID)
return -ENOMEM;
}
- brcmf_flowring_enqueue(flow, flowid, skb);
- brcmf_msgbuf_schedule_txdata(msgbuf, flowid, false);
+ queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
+ force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
+ brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force);
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c
index c824570ddea3..03f35e0c52ca 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c
@@ -39,10 +39,16 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
if (!sdiodev->pdata)
return;
+ if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
+ sdiodev->pdata->drive_strength = val;
+
+ /* make sure there are interrupts defined in the node */
+ if (!of_find_property(np, "interrupts", NULL))
+ return;
+
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
brcmf_err("interrupt could not be mapped\n");
- devm_kfree(dev, sdiodev->pdata);
return;
}
irqf = irqd_get_trigger_type(irq_get_irq_data(irq));
@@ -50,7 +56,4 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
sdiodev->pdata->oob_irq_supported = true;
sdiodev->pdata->oob_irq_nr = irq;
sdiodev->pdata->oob_irq_flags = irqf;
-
- if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
- sdiodev->pdata->drive_strength = val;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
index 1831ecd0813e..37a2624d7bba 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -51,6 +51,8 @@ enum brcmf_pcie_state {
#define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt"
#define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin"
#define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt"
+#define BRCMF_PCIE_4358_FW_NAME "brcm/brcmfmac4358-pcie.bin"
+#define BRCMF_PCIE_4358_NVRAM_NAME "brcm/brcmfmac4358-pcie.txt"
#define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
@@ -110,10 +112,11 @@ enum brcmf_pcie_state {
BRCMF_PCIE_MB_INT_D2H3_DB0 | \
BRCMF_PCIE_MB_INT_D2H3_DB1)
-#define BRCMF_PCIE_MIN_SHARED_VERSION 4
+#define BRCMF_PCIE_MIN_SHARED_VERSION 5
#define BRCMF_PCIE_MAX_SHARED_VERSION 5
#define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
-#define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT 0x4000
+#define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
+#define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
#define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
#define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
@@ -145,6 +148,10 @@ enum brcmf_pcie_state {
#define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
#define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
#define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
+#define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET 20
+#define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET 28
+#define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET 36
+#define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET 44
#define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0
#define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52
@@ -189,6 +196,8 @@ MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4358_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4358_NVRAM_NAME);
struct brcmf_pcie_console {
@@ -244,6 +253,13 @@ struct brcmf_pciedev_info {
bool mbdata_completed;
bool irq_allocated;
bool wowl_enabled;
+ u8 dma_idx_sz;
+ void *idxbuf;
+ u32 idxbuf_sz;
+ dma_addr_t idxbuf_dmahandle;
+ u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
+ void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+ u16 value);
};
struct brcmf_pcie_ringbuf {
@@ -273,15 +289,6 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
};
-/* dma flushing needs implementation for mips and arm platforms. Should
- * be put in util. Note, this is not real flushing. It is virtual non
- * cached memory. Only write buffers should have to be drained. Though
- * this may be different depending on platform......
- */
-#define brcmf_dma_flush(addr, len)
-#define brcmf_dma_invalidate_cache(addr, len)
-
-
static u32
brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
{
@@ -329,6 +336,25 @@ brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
}
+static u16
+brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
+{
+ u16 *address = devinfo->idxbuf + mem_offset;
+
+ return (*(address));
+}
+
+
+static void
+brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+ u16 value)
+{
+ u16 *address = devinfo->idxbuf + mem_offset;
+
+ *(address) = value;
+}
+
+
static u32
brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
{
@@ -874,7 +900,7 @@ static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
commonring->w_ptr, ring->id);
- brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr);
+ devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
return 0;
}
@@ -892,7 +918,7 @@ static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
commonring->r_ptr, ring->id);
- brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr);
+ devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
return 0;
}
@@ -921,7 +947,7 @@ static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
return -EIO;
- commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr);
+ commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
commonring->w_ptr, ring->id);
@@ -939,7 +965,7 @@ static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
return -EIO;
- commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr);
+ commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
commonring->r_ptr, ring->id);
@@ -1044,6 +1070,13 @@ static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
}
kfree(devinfo->shared.flowrings);
devinfo->shared.flowrings = NULL;
+ if (devinfo->idxbuf) {
+ dma_free_coherent(&devinfo->pdev->dev,
+ devinfo->idxbuf_sz,
+ devinfo->idxbuf,
+ devinfo->idxbuf_dmahandle);
+ devinfo->idxbuf = NULL;
+ }
}
@@ -1059,19 +1092,72 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
u32 addr;
u32 ring_mem_ptr;
u32 i;
+ u64 address;
+ u32 bufsz;
u16 max_sub_queues;
+ u8 idx_offset;
ring_addr = devinfo->shared.ring_info_addr;
brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
+ addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
+ max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
+
+ if (devinfo->dma_idx_sz != 0) {
+ bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) *
+ devinfo->dma_idx_sz * 2;
+ devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
+ &devinfo->idxbuf_dmahandle,
+ GFP_KERNEL);
+ if (!devinfo->idxbuf)
+ devinfo->dma_idx_sz = 0;
+ }
- addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
- d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
- addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
- d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
- addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
- h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
- addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
- h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ if (devinfo->dma_idx_sz == 0) {
+ addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
+ d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
+ d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
+ h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
+ h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ idx_offset = sizeof(u32);
+ devinfo->write_ptr = brcmf_pcie_write_tcm16;
+ devinfo->read_ptr = brcmf_pcie_read_tcm16;
+ brcmf_dbg(PCIE, "Using TCM indices\n");
+ } else {
+ memset(devinfo->idxbuf, 0, bufsz);
+ devinfo->idxbuf_sz = bufsz;
+ idx_offset = devinfo->dma_idx_sz;
+ devinfo->write_ptr = brcmf_pcie_write_idx;
+ devinfo->read_ptr = brcmf_pcie_read_idx;
+
+ h2d_w_idx_ptr = 0;
+ addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET;
+ address = (u64)devinfo->idxbuf_dmahandle;
+ brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+ brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+ h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset;
+ addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET;
+ address += max_sub_queues * idx_offset;
+ brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+ brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+ d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset;
+ addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET;
+ address += max_sub_queues * idx_offset;
+ brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+ brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+ d2h_r_idx_ptr = d2h_w_idx_ptr +
+ BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
+ addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET;
+ address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
+ brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+ brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+ brcmf_dbg(PCIE, "Using host memory indices\n");
+ }
addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
@@ -1085,8 +1171,8 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
ring->id = i;
devinfo->shared.commonrings[i] = ring;
- h2d_w_idx_ptr += sizeof(u32);
- h2d_r_idx_ptr += sizeof(u32);
+ h2d_w_idx_ptr += idx_offset;
+ h2d_r_idx_ptr += idx_offset;
ring_mem_ptr += BRCMF_RING_MEM_SZ;
}
@@ -1100,13 +1186,11 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
ring->id = i;
devinfo->shared.commonrings[i] = ring;
- d2h_w_idx_ptr += sizeof(u32);
- d2h_r_idx_ptr += sizeof(u32);
+ d2h_w_idx_ptr += idx_offset;
+ d2h_r_idx_ptr += idx_offset;
ring_mem_ptr += BRCMF_RING_MEM_SZ;
}
- addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
- max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
devinfo->shared.nrof_flowrings =
max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
@@ -1130,15 +1214,15 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
ring);
ring->w_idx_addr = h2d_w_idx_ptr;
ring->r_idx_addr = h2d_r_idx_ptr;
- h2d_w_idx_ptr += sizeof(u32);
- h2d_r_idx_ptr += sizeof(u32);
+ h2d_w_idx_ptr += idx_offset;
+ h2d_r_idx_ptr += idx_offset;
}
devinfo->shared.flowrings = rings;
return 0;
fail:
- brcmf_err("Allocating commonring buffers failed\n");
+ brcmf_err("Allocating ring buffers failed\n");
brcmf_pcie_release_ringbuffers(devinfo);
return -ENOMEM;
}
@@ -1171,7 +1255,6 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
goto fail;
memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
- brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
@@ -1189,7 +1272,6 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
goto fail;
memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
- brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
@@ -1276,10 +1358,13 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
brcmf_err("Unsupported PCIE version %d\n", version);
return -EINVAL;
}
- if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) {
- brcmf_err("Unsupported legacy TX mode 0x%x\n",
- shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT);
- return -EINVAL;
+
+ /* check firmware support dma indicies */
+ if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
+ if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
+ devinfo->dma_idx_sz = sizeof(u16);
+ else
+ devinfo->dma_idx_sz = sizeof(u32);
}
addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
@@ -1333,6 +1418,10 @@ static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
fw_name = BRCMF_PCIE_43570_FW_NAME;
nvram_name = BRCMF_PCIE_43570_NVRAM_NAME;
break;
+ case BRCM_CC_4358_CHIP_ID:
+ fw_name = BRCMF_PCIE_4358_FW_NAME;
+ nvram_name = BRCMF_PCIE_4358_NVRAM_NAME;
+ break;
default:
brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
return -ENODEV;
@@ -1609,7 +1698,7 @@ static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
bus->msgbuf->commonrings[i] =
&devinfo->shared.commonrings[i]->commonring;
- flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(flowrings),
+ flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*flowrings),
GFP_KERNEL);
if (!flowrings)
goto fail;
@@ -1641,8 +1730,13 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct brcmf_pciedev_info *devinfo;
struct brcmf_pciedev *pcie_bus_dev;
struct brcmf_bus *bus;
+ u16 domain_nr;
+ u16 bus_nr;
- brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
+ domain_nr = pci_domain_nr(pdev->bus) + 1;
+ bus_nr = pdev->bus->number;
+ brcmf_dbg(PCIE, "Enter %x:%x (%d/%d)\n", pdev->vendor, pdev->device,
+ domain_nr, bus_nr);
ret = -ENOMEM;
devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
@@ -1691,10 +1785,10 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto fail_bus;
- ret = brcmf_fw_get_firmwares(bus->dev, BRCMF_FW_REQUEST_NVRAM |
- BRCMF_FW_REQ_NV_OPTIONAL,
- devinfo->fw_name, devinfo->nvram_name,
- brcmf_pcie_setup);
+ ret = brcmf_fw_get_firmwares_pcie(bus->dev, BRCMF_FW_REQUEST_NVRAM |
+ BRCMF_FW_REQ_NV_OPTIONAL,
+ devinfo->fw_name, devinfo->nvram_name,
+ brcmf_pcie_setup, domain_nr, bus_nr);
if (ret == 0)
return 0;
fail_bus:
@@ -1850,9 +1944,11 @@ static struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
{ /* end: all zeroes */ }
};
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index ab0c89833013..bf7a8b1ad914 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -601,6 +601,8 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
#define BCM43241B0_NVRAM_NAME "brcm/brcmfmac43241b0-sdio.txt"
#define BCM43241B4_FIRMWARE_NAME "brcm/brcmfmac43241b4-sdio.bin"
#define BCM43241B4_NVRAM_NAME "brcm/brcmfmac43241b4-sdio.txt"
+#define BCM43241B5_FIRMWARE_NAME "brcm/brcmfmac43241b5-sdio.bin"
+#define BCM43241B5_NVRAM_NAME "brcm/brcmfmac43241b5-sdio.txt"
#define BCM4329_FIRMWARE_NAME "brcm/brcmfmac4329-sdio.bin"
#define BCM4329_NVRAM_NAME "brcm/brcmfmac4329-sdio.txt"
#define BCM4330_FIRMWARE_NAME "brcm/brcmfmac4330-sdio.bin"
@@ -628,6 +630,8 @@ MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B5_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B5_NVRAM_NAME);
MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
@@ -667,7 +671,8 @@ enum brcmf_firmware_type {
static const struct brcmf_firmware_names brcmf_fwname_data[] = {
{ BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
{ BRCM_CC_43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
- { BRCM_CC_43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+ { BRCM_CC_43241_CHIP_ID, 0x00000020, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+ { BRCM_CC_43241_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43241B5) },
{ BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
{ BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
{ BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
@@ -3550,10 +3555,6 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
return;
}
- if (bus->sdiodev->state != BRCMF_SDIOD_DATA) {
- brcmf_err("bus is down. we have nothing to do\n");
- return;
- }
/* Count the interrupt call */
bus->sdcnt.intrcount++;
if (in_interrupt())
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 5df6aa72cc2d..daba86d881bc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1270,8 +1270,13 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->chiprev = bus_pub->chiprev;
/* request firmware here */
- brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
- brcmf_usb_probe_phase2);
+ ret = brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo),
+ NULL, brcmf_usb_probe_phase2);
+ if (ret) {
+ brcmf_err("firmware request failed: %d\n", ret);
+ goto fail;
+ }
+
return 0;
fail:
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 48135063347e..b46cab250615 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -41,8 +41,7 @@
#define BRCMS_FLUSH_TIMEOUT 500 /* msec */
/* Flags we support */
-#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
+#define MAC_FILTERS (FIF_ALLMULTI | \
FIF_FCSFAIL | \
FIF_CONTROL | \
FIF_OTHER_BSS | \
@@ -743,8 +742,6 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
changed_flags &= MAC_FILTERS;
*total_flags &= MAC_FILTERS;
- if (changed_flags & FIF_PROMISC_IN_BSS)
- brcms_dbg_info(core, "FIF_PROMISC_IN_BSS\n");
if (changed_flags & FIF_ALLMULTI)
brcms_dbg_info(core, "FIF_ALLMULTI\n");
if (changed_flags & FIF_FCSFAIL)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 369527e27689..9728be0e704b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -3571,7 +3571,7 @@ void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags)
wlc->filter_flags = filter_flags;
- if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
+ if (filter_flags & FIF_OTHER_BSS)
promisc_bits |= MCTL_PROMISC;
if (filter_flags & FIF_BCN_PRBRESP_PROMISC)
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 4efdd51af9c8..7a6daa37dc6b 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -45,6 +45,7 @@
#define BRCM_CC_43567_CHIP_ID 43567
#define BRCM_CC_43569_CHIP_ID 43569
#define BRCM_CC_43570_CHIP_ID 43570
+#define BRCM_CC_4358_CHIP_ID 0x4358
#define BRCM_CC_43602_CHIP_ID 43602
/* USB Device IDs */
@@ -59,9 +60,11 @@
#define BRCM_PCIE_4356_DEVICE_ID 0x43ec
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
+#define BRCM_PCIE_4358_DEVICE_ID 0x43e9
#define BRCM_PCIE_43602_DEVICE_ID 0x43ba
#define BRCM_PCIE_43602_2G_DEVICE_ID 0x43bb
#define BRCM_PCIE_43602_5G_DEVICE_ID 0x43bc
+#define BRCM_PCIE_43602_RAW_DEVICE_ID 43602
/* brcmsmac IDs */
#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index b0f65fa09428..b86500b4418f 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -578,13 +578,11 @@ void cw1200_configure_filter(struct ieee80211_hw *dev,
{
struct cw1200_common *priv = dev->priv;
bool listening = !!(*total_flags &
- (FIF_PROMISC_IN_BSS |
- FIF_OTHER_BSS |
+ (FIF_OTHER_BSS |
FIF_BCN_PRBRESP_PROMISC |
FIF_PROBE_REQ));
- *total_flags &= FIF_PROMISC_IN_BSS |
- FIF_OTHER_BSS |
+ *total_flags &= FIF_OTHER_BSS |
FIF_FCSFAIL |
FIF_BCN_PRBRESP_PROMISC |
FIF_PROBE_REQ;
@@ -592,14 +590,12 @@ void cw1200_configure_filter(struct ieee80211_hw *dev,
down(&priv->scan.lock);
mutex_lock(&priv->conf_mutex);
- priv->rx_filter.promiscuous = (*total_flags & FIF_PROMISC_IN_BSS)
- ? 1 : 0;
+ priv->rx_filter.promiscuous = 0;
priv->rx_filter.bssid = (*total_flags & (FIF_OTHER_BSS |
FIF_PROBE_REQ)) ? 1 : 0;
priv->rx_filter.fcs = (*total_flags & FIF_FCSFAIL) ? 1 : 0;
priv->disable_beacon_filter = !(*total_flags &
(FIF_BCN_PRBRESP_PROMISC |
- FIF_PROMISC_IN_BSS |
FIF_PROBE_REQ));
if (priv->listening != listening) {
priv->listening = listening;
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index e5665804d986..189cdf58084b 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3048,7 +3048,7 @@ il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
*total_flags);
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
@@ -3074,7 +3074,7 @@ il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
* filters into the device.
*/
*total_flags &=
- FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_OTHER_BSS | FIF_ALLMULTI |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 976f65fe9c38..e4b175cbeefd 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -6166,7 +6166,7 @@ il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
*total_flags);
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
@@ -6192,7 +6192,7 @@ il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
* filters into the device.
*/
*total_flags &=
- FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_OTHER_BSS | FIF_ALLMULTI |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index f89f446e5c8a..aba095761ac6 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -21,6 +21,7 @@ config IWLWIFI
Intel 7260 Wi-Fi Adapter
Intel 3160 Wi-Fi Adapter
Intel 7265 Wi-Fi Adapter
+ Intel 8260 Wi-Fi Adapter
Intel 3165 Wi-Fi Adapter
@@ -54,16 +55,17 @@ config IWLDVM
tristate "Intel Wireless WiFi DVM Firmware support"
default IWLWIFI
help
- This is the driver that supports the DVM firmware which is
- used by most existing devices (with the exception of 7260
- and 3160).
+ This is the driver that supports the DVM firmware. The list
+ of the devices that use this firmware is available here:
+ https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
config IWLMVM
tristate "Intel Wireless WiFi MVM Firmware support"
select WANT_DEV_COREDUMP
help
- This is the driver that supports the MVM firmware which is
- currently only available for 7260 and 3160 devices.
+ This is the driver that supports the MVM firmware. The list
+ of the devices that use this firmware is available here:
+ https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
# don't call it _MODULE -- will confuse Kconfig/fixdep/...
config IWLWIFI_OPMODE_MODULAR
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 3d32f4120174..dbfc5b18bcb7 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -9,6 +9,7 @@ iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
+iwlwifi-objs += iwl-trans.o
iwlwifi-objs += $(iwlwifi-m)
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 5abd62ed8cb4..852461ffe98f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -112,6 +112,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
IEEE80211_HW_QUEUE_CONTROL |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+ IEEE80211_HW_SUPPORT_FAST_XMIT |
IEEE80211_HW_WANT_MONITOR_VIF;
hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
@@ -1061,7 +1062,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
changed_flags, *total_flags);
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
@@ -1088,7 +1089,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
* since we currently do not support programming multicast
* filters into the device.
*/
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
@@ -1140,7 +1141,6 @@ static void iwlagn_mac_event_callback(struct ieee80211_hw *hw,
return;
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->mutex);
if (priv->lib->bt_params &&
priv->lib->bt_params->advanced_bt_coexist) {
@@ -1149,13 +1149,12 @@ static void iwlagn_mac_event_callback(struct ieee80211_hw *hw,
else if (event->u.rssi.data == RSSI_EVENT_HIGH)
priv->bt_enable_pspoll = false;
- iwlagn_send_advance_bt_config(priv);
+ queue_work(priv->workqueue, &priv->bt_runtime_config);
} else {
IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
"ignoring RSSI callback\n");
}
- mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 74ad278116be..cc35f796d406 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,7 +69,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 13
+#define IWL7260_UCODE_API_MAX 15
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 12
@@ -124,6 +124,28 @@ static const struct iwl_base_params iwl7000_base_params = {
.apmg_wake_up_wa = true,
};
+static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
+ .ct_kill_entry = 118,
+ .ct_kill_exit = 96,
+ .ct_kill_duration = 5,
+ .dynamic_smps_entry = 114,
+ .dynamic_smps_exit = 110,
+ .tx_protection_entry = 114,
+ .tx_protection_exit = 108,
+ .tx_backoff = {
+ {.temperature = 112, .backoff = 300},
+ {.temperature = 113, .backoff = 800},
+ {.temperature = 114, .backoff = 1500},
+ {.temperature = 115, .backoff = 3000},
+ {.temperature = 116, .backoff = 5000},
+ {.temperature = 117, .backoff = 10000},
+ },
+ .support_ct_kill = true,
+ .support_dynamic_smps = true,
+ .support_tx_protection = true,
+ .support_tx_backoff = true,
+};
+
static const struct iwl_ht_params iwl7000_ht_params = {
.stbc = true,
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
@@ -166,6 +188,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
.dccm_len = IWL7260_DCCM_LEN,
+ .thermal_params = &iwl7000_high_temp_tt_params,
};
const struct iwl_cfg iwl7260_2n_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index ce6321b7d241..72040cd0b979 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -69,7 +69,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 13
+#define IWL8000_UCODE_API_MAX 15
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 12
@@ -122,24 +122,49 @@ static const struct iwl_ht_params iwl8000_ht_params = {
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
};
-#define IWL_DEVICE_8000 \
- .ucode_api_max = IWL8000_UCODE_API_MAX, \
- .ucode_api_ok = IWL8000_UCODE_API_OK, \
- .ucode_api_min = IWL8000_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_8000, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
- .base_params = &iwl8000_base_params, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
- .d0i3 = true, \
- .non_shared_ant = ANT_A, \
- .dccm_offset = IWL8260_DCCM_OFFSET, \
- .dccm_len = IWL8260_DCCM_LEN, \
- .dccm2_offset = IWL8260_DCCM2_OFFSET, \
- .dccm2_len = IWL8260_DCCM2_LEN, \
- .smem_offset = IWL8260_SMEM_OFFSET, \
- .smem_len = IWL8260_SMEM_LEN
+static const struct iwl_tt_params iwl8000_tt_params = {
+ .ct_kill_entry = 115,
+ .ct_kill_exit = 93,
+ .ct_kill_duration = 5,
+ .dynamic_smps_entry = 111,
+ .dynamic_smps_exit = 107,
+ .tx_protection_entry = 112,
+ .tx_protection_exit = 105,
+ .tx_backoff = {
+ {.temperature = 110, .backoff = 200},
+ {.temperature = 111, .backoff = 600},
+ {.temperature = 112, .backoff = 1200},
+ {.temperature = 113, .backoff = 2000},
+ {.temperature = 114, .backoff = 4000},
+ },
+ .support_ct_kill = true,
+ .support_dynamic_smps = true,
+ .support_tx_protection = true,
+ .support_tx_backoff = true,
+};
+
+#define IWL_DEVICE_8000 \
+ .ucode_api_max = IWL8000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL8000_UCODE_API_OK, \
+ .ucode_api_min = IWL8000_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_8000, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .base_params = &iwl8000_base_params, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
+ .d0i3 = true, \
+ .non_shared_ant = ANT_A, \
+ .dccm_offset = IWL8260_DCCM_OFFSET, \
+ .dccm_len = IWL8260_DCCM_LEN, \
+ .dccm2_offset = IWL8260_DCCM2_OFFSET, \
+ .dccm2_len = IWL8260_DCCM2_LEN, \
+ .smem_offset = IWL8260_SMEM_OFFSET, \
+ .smem_len = IWL8260_SMEM_LEN, \
+ .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, \
+ .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \
+ .thermal_params = &iwl8000_tt_params, \
+ .apmg_not_supported = true
const struct iwl_cfg iwl8260_2n_cfg = {
.name = "Intel(R) Dual Band Wireless N 8260",
@@ -177,8 +202,6 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
- .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
- .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
@@ -192,8 +215,6 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
- .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
- .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.bt_shared_single_ant = true,
.disable_dummy_notification = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 3f33f753ce2f..08c14afeb148 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -195,6 +195,49 @@ struct iwl_ht_params {
};
/*
+ * Tx-backoff threshold
+ * @temperature: The threshold in Celsius
+ * @backoff: The tx-backoff in uSec
+ */
+struct iwl_tt_tx_backoff {
+ s32 temperature;
+ u32 backoff;
+};
+
+#define TT_TX_BACKOFF_SIZE 6
+
+/**
+ * struct iwl_tt_params - thermal throttling parameters
+ * @ct_kill_entry: CT Kill entry threshold
+ * @ct_kill_exit: CT Kill exit threshold
+ * @ct_kill_duration: The time intervals (in uSec) in which the driver needs
+ * to checks whether to exit CT Kill.
+ * @dynamic_smps_entry: Dynamic SMPS entry threshold
+ * @dynamic_smps_exit: Dynamic SMPS exit threshold
+ * @tx_protection_entry: TX protection entry threshold
+ * @tx_protection_exit: TX protection exit threshold
+ * @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
+ * @support_ct_kill: Support CT Kill?
+ * @support_dynamic_smps: Support dynamic SMPS?
+ * @support_tx_protection: Support tx protection?
+ * @support_tx_backoff: Support tx-backoff?
+ */
+struct iwl_tt_params {
+ s32 ct_kill_entry;
+ s32 ct_kill_exit;
+ u32 ct_kill_duration;
+ s32 dynamic_smps_entry;
+ s32 dynamic_smps_exit;
+ s32 tx_protection_entry;
+ s32 tx_protection_exit;
+ struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
+ bool support_ct_kill;
+ bool support_dynamic_smps;
+ bool support_tx_protection;
+ bool support_tx_backoff;
+};
+
+/*
* information on how to parse the EEPROM
*/
#define EEPROM_REG_BAND_1_CHANNELS 0x08
@@ -316,6 +359,8 @@ struct iwl_cfg {
const u32 dccm2_len;
const u32 smem_offset;
const u32 smem_len;
+ const struct iwl_tt_params *thermal_params;
+ bool apmg_not_supported;
};
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
index 223b8752f924..948ce0802fa7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -64,19 +65,21 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
TRACE_EVENT(iwlwifi_dev_rx,
TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
- void *rxbuf, size_t len),
- TP_ARGS(dev, trans, rxbuf, len),
+ struct iwl_rx_packet *pkt, size_t len),
+ TP_ARGS(dev, trans, pkt, len),
TP_STRUCT__entry(
DEV_ENTRY
- __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len))
+ __field(u8, cmd)
+ __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
),
TP_fast_assign(
DEV_ASSIGN;
- memcpy(__get_dynamic_array(rxbuf), rxbuf,
- iwl_rx_trace_len(trans, rxbuf, len));
+ __entry->cmd = pkt->hdr.cmd;
+ memcpy(__get_dynamic_array(rxbuf), pkt,
+ iwl_rx_trace_len(trans, pkt, len));
),
TP_printk("[%s] RX cmd %#.2x",
- __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
+ __get_str(dev), __entry->cmd)
);
TRACE_EVENT(iwlwifi_dev_tx,
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 7267152e7dc7..6685259927f8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -423,13 +423,19 @@ static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
{
const struct iwl_ucode_api *ucode_api = (void *)data;
u32 api_index = le32_to_cpu(ucode_api->api_index);
+ u32 api_flags = le32_to_cpu(ucode_api->api_flags);
+ int i;
- if (api_index >= IWL_API_ARRAY_SIZE) {
+ if (api_index >= IWL_API_MAX_BITS / 32) {
IWL_ERR(drv, "api_index larger than supported by driver\n");
- return -EINVAL;
+ /* don't return an error so we can load FW that has more bits */
+ return 0;
}
- capa->api[api_index] = le32_to_cpu(ucode_api->api_flags);
+ for (i = 0; i < 32; i++) {
+ if (api_flags & BIT(i))
+ __set_bit(i + 32 * api_index, capa->_api);
+ }
return 0;
}
@@ -439,13 +445,19 @@ static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
{
const struct iwl_ucode_capa *ucode_capa = (void *)data;
u32 api_index = le32_to_cpu(ucode_capa->api_index);
+ u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
+ int i;
- if (api_index >= IWL_CAPABILITIES_ARRAY_SIZE) {
+ if (api_index >= IWL_CAPABILITIES_MAX_BITS / 32) {
IWL_ERR(drv, "api_index larger than supported by driver\n");
- return -EINVAL;
+ /* don't return an error so we can load FW that has more bits */
+ return 0;
}
- capa->capa[api_index] = le32_to_cpu(ucode_capa->api_capa);
+ for (i = 0; i < 32; i++) {
+ if (api_flags & BIT(i))
+ __set_bit(i + 32 * api_index, capa->_capa);
+ }
return 0;
}
@@ -1148,7 +1160,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (err)
goto try_again;
- if (drv->fw.ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)
+ if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION))
api_ver = drv->fw.ucode_ver;
else
api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
@@ -1239,6 +1251,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
sizeof(struct iwl_fw_dbg_trigger_txq_timer);
trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] =
sizeof(struct iwl_fw_dbg_trigger_time_event);
+ trigger_tlv_sz[FW_DBG_TRIGGER_BA] =
+ sizeof(struct iwl_fw_dbg_trigger_ba);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
if (pieces->dbg_trigger_tlv[i]) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index d45dc021cda2..d56064861a9c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -438,12 +438,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define RX_QUEUE_MASK 255
#define RX_QUEUE_SIZE_LOG 8
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
/**
* struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index 251bf8dc4a12..e57dbd0ef2e1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -254,6 +254,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* detection.
* @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
* events.
+ * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
@@ -267,6 +268,7 @@ enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_RSSI,
FW_DBG_TRIGGER_TXQ_TIMERS,
FW_DBG_TRIGGER_TIME_EVENT,
+ FW_DBG_TRIGGER_BA,
/* must be last */
FW_DBG_TRIGGER_MAX,
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 62db2e5e45eb..a9b5ae4ebec0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -237,6 +237,8 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
};
+typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
+
/**
* enum iwl_ucode_tlv_api - ucode api
* @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
@@ -255,22 +257,27 @@ enum iwl_ucode_tlv_flag {
* @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
* @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10
* @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
+ * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
+ * instead of 3.
*/
enum iwl_ucode_tlv_api {
- IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
- IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
- IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9),
- IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10),
- IWL_UCODE_TLV_API_TX_POWER_DEV = BIT(11),
- IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
- IWL_UCODE_TLV_API_SCD_CFG = BIT(15),
- IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
- IWL_UCODE_TLV_API_ASYNC_DTM = BIT(17),
- IWL_UCODE_TLV_API_LQ_SS_PARAMS = BIT(18),
- IWL_UCODE_TLV_API_STATS_V10 = BIT(19),
- IWL_UCODE_TLV_API_NEW_VERSION = BIT(20),
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3,
+ IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
+ IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
+ IWL_UCODE_TLV_API_HDC_PHASE_0 = (__force iwl_ucode_tlv_api_t)10,
+ IWL_UCODE_TLV_API_TX_POWER_DEV = (__force iwl_ucode_tlv_api_t)11,
+ IWL_UCODE_TLV_API_BASIC_DWELL = (__force iwl_ucode_tlv_api_t)13,
+ IWL_UCODE_TLV_API_SCD_CFG = (__force iwl_ucode_tlv_api_t)15,
+ IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = (__force iwl_ucode_tlv_api_t)16,
+ IWL_UCODE_TLV_API_ASYNC_DTM = (__force iwl_ucode_tlv_api_t)17,
+ IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
+ IWL_UCODE_TLV_API_STATS_V10 = (__force iwl_ucode_tlv_api_t)19,
+ IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
+ IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24,
};
+typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
+
/**
* enum iwl_ucode_tlv_capa - ucode capabilities
* @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
@@ -290,6 +297,7 @@ enum iwl_ucode_tlv_api {
* which also implies support for the scheduler configuration command
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
* @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
* @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
@@ -299,22 +307,23 @@ enum iwl_ucode_tlv_api {
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
*/
enum iwl_ucode_tlv_capa {
- IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT = BIT(1),
- IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2),
- IWL_UCODE_TLV_CAPA_BEAMFORMER = BIT(3),
- IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6),
- IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8),
- IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
- IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
- IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
- IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
- IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13),
- IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = BIT(22),
- IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = BIT(28),
- IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = BIT(29),
- IWL_UCODE_TLV_CAPA_BT_COEX_RRC = BIT(30),
+ IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1,
+ IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2,
+ IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3,
+ IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6,
+ IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8,
+ IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10,
+ IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11,
+ IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12,
+ IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
+ IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
+ IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
+ IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28,
+ IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
+ IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
};
/* The default calibrate table size if not specified by firmware file */
@@ -325,13 +334,14 @@ enum iwl_ucode_tlv_capa {
/* The default max probe length if not specified by the firmware file */
#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
+#define IWL_API_MAX_BITS 64
+#define IWL_CAPABILITIES_MAX_BITS 64
+
/*
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
#define IWL_UCODE_SECTION_MAX 12
-#define IWL_API_ARRAY_SIZE 1
-#define IWL_CAPABILITIES_ARRAY_SIZE 1
#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
/* uCode version contains 4 values: Major/Minor/API/Serial */
@@ -424,11 +434,13 @@ struct iwl_fw_dbg_reg_op {
* @SMEM_MODE: monitor stores the data in SMEM
* @EXTERNAL_MODE: monitor stores the data in allocated DRAM
* @MARBH_MODE: monitor stores the data in MARBH buffer
+ * @MIPI_MODE: monitor outputs the data through the MIPI interface
*/
enum iwl_fw_dbg_monitor_mode {
SMEM_MODE = 0,
EXTERNAL_MODE = 1,
MARBH_MODE = 2,
+ MIPI_MODE = 3,
};
/**
@@ -436,6 +448,7 @@ enum iwl_fw_dbg_monitor_mode {
*
* @version: version of the TLV - currently 0
* @monitor_mode: %enum iwl_fw_dbg_monitor_mode
+ * @size_power: buffer size will be 2^(size_power + 11)
* @base_reg: addr of the base addr register (PRPH)
* @end_reg: addr of the end addr register (PRPH)
* @write_ptr_reg: the addr of the reg of the write pointer
@@ -449,7 +462,8 @@ enum iwl_fw_dbg_monitor_mode {
struct iwl_fw_dbg_dest_tlv {
u8 version;
u8 monitor_mode;
- u8 reserved[2];
+ u8 size_power;
+ u8 reserved;
__le32 base_reg;
__le32 end_reg;
__le32 write_ptr_reg;
@@ -659,6 +673,33 @@ struct iwl_fw_dbg_trigger_time_event {
} __packed;
/**
+ * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger
+ * rx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ * when an Rx BlockAck session is started.
+ * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ * when an Rx BlockAck session is stopped.
+ * tx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ * when a Tx BlockAck session is started.
+ * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ * when a Tx BlockAck session is stopped.
+ * rx_bar: tid bitmap to configure on what tid the trigger should occur
+ * when a BAR is received (for a Tx BlockAck session).
+ * tx_bar: tid bitmap to configure on what tid the trigger should occur
+ * when a BAR is send (for an Rx BlocAck session).
+ * frame_timeout: tid bitmap to configure on what tid the trigger should occur
+ * when a frame times out in the reodering buffer.
+ */
+struct iwl_fw_dbg_trigger_ba {
+ __le16 rx_ba_start;
+ __le16 rx_ba_stop;
+ __le16 tx_ba_start;
+ __le16 tx_ba_stop;
+ __le16 rx_bar;
+ __le16 tx_bar;
+ __le16 frame_timeout;
+} __packed;
+
+/**
* struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
* @id: conf id
* @usniffer: should the uSniffer image be used
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index cf75bafae51d..3e3c9d8b3c37 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -105,10 +105,24 @@ struct iwl_ucode_capabilities {
u32 n_scan_channels;
u32 standard_phy_calibration_size;
u32 flags;
- u32 api[IWL_API_ARRAY_SIZE];
- u32 capa[IWL_CAPABILITIES_ARRAY_SIZE];
+ unsigned long _api[BITS_TO_LONGS(IWL_API_MAX_BITS)];
+ unsigned long _capa[BITS_TO_LONGS(IWL_CAPABILITIES_MAX_BITS)];
};
+static inline bool
+fw_has_api(const struct iwl_ucode_capabilities *capabilities,
+ iwl_ucode_tlv_api_t api)
+{
+ return test_bit((__force long)api, capabilities->_api);
+}
+
+static inline bool
+fw_has_capa(const struct iwl_ucode_capabilities *capabilities,
+ iwl_ucode_tlv_capa_t capa)
+{
+ return test_bit((__force long)capa, capabilities->_capa);
+}
+
/* one for each uCode image (inst/data, init/runtime/wowlan) */
struct fw_desc {
const void *data; /* vmalloc'ed data */
@@ -205,6 +219,8 @@ static inline const char *get_fw_dbg_mode_string(int mode)
return "EXTERNAL_DRAM";
case MARBH_MODE:
return "MARBH";
+ case MIPI_MODE:
+ return "MIPI";
default:
return "UNKNOWN";
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 8e604a3931ca..80fefe7d7b8c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -249,7 +249,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
*/
if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
(flags & IEEE80211_CHAN_NO_IR))
- flags |= IEEE80211_CHAN_GO_CONCURRENT;
+ flags |= IEEE80211_CHAN_IR_CONCURRENT;
return flags;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 88a57e6e232f..5af1c776d2d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -348,6 +348,9 @@ enum secure_load_status_reg {
#define MON_BUFF_WRPTR (0xa03c44)
#define MON_BUFF_CYCLE_CNT (0xa03c48)
+#define MON_DMARB_RD_CTL_ADDR (0xa03c60)
+#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
+
#define DBGC_IN_SAMPLE (0xa03c00)
/* enable the ID buf for read */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
new file mode 100644
index 000000000000..9f8bcefc04c5
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
@@ -0,0 +1,113 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include "iwl-trans.h"
+
+struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+ struct device *dev,
+ const struct iwl_cfg *cfg,
+ const struct iwl_trans_ops *ops,
+ size_t dev_cmd_headroom)
+{
+ struct iwl_trans *trans;
+#ifdef CONFIG_LOCKDEP
+ static struct lock_class_key __key;
+#endif
+
+ trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL);
+ if (!trans)
+ return NULL;
+
+#ifdef CONFIG_LOCKDEP
+ lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
+ &__key, 0);
+#endif
+
+ trans->dev = dev;
+ trans->cfg = cfg;
+ trans->ops = ops;
+ trans->dev_cmd_headroom = dev_cmd_headroom;
+
+ snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
+ "iwl_cmd_pool:%s", dev_name(trans->dev));
+ trans->dev_cmd_pool =
+ kmem_cache_create(trans->dev_cmd_pool_name,
+ sizeof(struct iwl_device_cmd)
+ + trans->dev_cmd_headroom,
+ sizeof(void *),
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!trans->dev_cmd_pool)
+ goto free;
+
+ return trans;
+ free:
+ kfree(trans);
+ return NULL;
+}
+
+void iwl_trans_free(struct iwl_trans *trans)
+{
+ kmem_cache_destroy(trans->dev_cmd_pool);
+ kfree(trans);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 56254a837214..87a230a7f4b6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -641,6 +641,8 @@ struct iwl_trans {
enum iwl_d0i3_mode d0i3_mode;
+ bool wowlan_d0i3;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -1011,19 +1013,19 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
}
/*****************************************************
+ * transport helper functions
+ *****************************************************/
+struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+ struct device *dev,
+ const struct iwl_cfg *cfg,
+ const struct iwl_trans_ops *ops,
+ size_t dev_cmd_headroom);
+void iwl_trans_free(struct iwl_trans *trans);
+
+/*****************************************************
* driver (transport) register/unregister functions
******************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
-static inline void trans_lockdep_init(struct iwl_trans *trans)
-{
-#ifdef CONFIG_LOCKDEP
- static struct lock_class_key __key;
-
- lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
- &__key, 0);
-#endif
-}
-
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 13a0a03158de..b4737e296c92 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -408,23 +408,12 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
{
- struct iwl_bt_coex_cmd *bt_cmd;
- struct iwl_host_cmd cmd = {
- .id = BT_CONFIG,
- .len = { sizeof(*bt_cmd), },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
- int ret;
+ struct iwl_bt_coex_cmd bt_cmd = {};
u32 mode;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_send_bt_init_conf_old(mvm);
- bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
- if (!bt_cmd)
- return -ENOMEM;
- cmd.data[0] = bt_cmd;
-
lockdep_assert_held(&mvm->mutex);
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
@@ -440,36 +429,33 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
mode = 0;
}
- bt_cmd->mode = cpu_to_le32(mode);
+ bt_cmd.mode = cpu_to_le32(mode);
goto send_cmd;
}
mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
- bt_cmd->mode = cpu_to_le32(mode);
+ bt_cmd.mode = cpu_to_le32(mode);
if (IWL_MVM_BT_COEX_SYNC2SCO)
- bt_cmd->enabled_modules |=
+ bt_cmd.enabled_modules |=
cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
if (iwl_mvm_bt_is_plcr_supported(mvm))
- bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
+ bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
if (IWL_MVM_BT_COEX_MPLUT) {
- bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
- bt_cmd->enabled_modules |=
+ bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
+ bt_cmd.enabled_modules |=
cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED);
}
- bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
+ bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
send_cmd:
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
- ret = iwl_mvm_send_cmd(mvm, &cmd);
-
- kfree(bt_cmd);
- return ret;
+ return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
}
static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
@@ -746,7 +732,7 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
@@ -770,52 +756,14 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
return 0;
}
-static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_bt_iterator_data *data = _data;
- struct iwl_mvm *mvm = data->mvm;
-
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
-
- struct ieee80211_chanctx_conf *chanctx_conf;
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- /* If channel context is invalid or not on 2.4GHz - don't count it */
- if (!chanctx_conf ||
- chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
- rcu_read_unlock();
- return;
- }
- rcu_read_unlock();
-
- if (vif->type != NL80211_IFTYPE_STATION ||
- mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
- return;
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
- lockdep_is_held(&mvm->mutex));
-
- /* This can happen if the station has been removed right now */
- if (IS_ERR_OR_NULL(sta))
- return;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-}
-
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data rssi_event)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_bt_iterator_data data = {
- .mvm = mvm,
- };
int ret;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
return;
}
@@ -853,10 +801,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
-
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bt_rssi_iterator, &data);
}
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
@@ -870,7 +814,7 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
enum iwl_bt_coex_lut_type lut_type;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
@@ -897,7 +841,7 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
enum iwl_bt_coex_lut_type lut_type;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
@@ -927,7 +871,7 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
if (ant & mvm->cfg->non_shared_ant)
return true;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
@@ -940,10 +884,10 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
if (mvm->cfg->bt_shared_single_ant)
return true;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
- return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF;
+ return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
}
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
@@ -951,7 +895,7 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
{
u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
if (band != IEEE80211_BAND_2GHZ)
@@ -994,7 +938,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
iwl_mvm_bt_coex_vif_change_old(mvm);
return;
}
@@ -1012,7 +957,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
u8 __maybe_unused lower_bound, upper_bound;
u8 lut;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
if (!iwl_mvm_bt_is_plcr_supported(mvm))
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 4310cf102d78..4165d104e4c3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -761,7 +761,7 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
{
- iwl_mvm_cancel_scan(mvm);
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
iwl_trans_stop_device(mvm->trans);
@@ -981,7 +981,8 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
if (ret)
return ret;
- ret = iwl_mvm_scan_offload_start(mvm, vif, nd_config, &mvm->nd_ies);
+ ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
+ IWL_MVM_SCAN_NETDETECT);
if (ret)
return ret;
@@ -1169,7 +1170,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
iwl_trans_suspend(mvm->trans);
- if (wowlan->any) {
+ mvm->trans->wowlan_d0i3 = wowlan->any;
+ if (mvm->trans->wowlan_d0i3) {
/* 'any' trigger means d0i3 usage */
if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
int ret = iwl_mvm_enter_d0i3_sync(mvm);
@@ -1784,7 +1786,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
struct iwl_scan_offload_profile_match *fw_match;
struct cfg80211_wowlan_nd_match *match;
- int n_channels = 0;
+ int idx, n_channels = 0;
fw_match = &query.matches[i];
@@ -1799,8 +1801,12 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
net_detect->matches[net_detect->n_matches++] = match;
- match->ssid.ssid_len = mvm->nd_match_sets[i].ssid.ssid_len;
- memcpy(match->ssid.ssid, mvm->nd_match_sets[i].ssid.ssid,
+ /* We inverted the order of the SSIDs in the scan
+ * request, so invert the index here.
+ */
+ idx = mvm->n_nd_match_sets - i - 1;
+ match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
+ memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
match->ssid.ssid_len);
if (mvm->n_nd_channels < n_channels)
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 5f37eab5008d..5c8a65de0e77 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -190,6 +190,21 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
return ret ?: count;
}
+static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ char buf[64];
+ int bufsz = sizeof(buf);
+ int pos;
+
+ pos = scnprintf(buf, bufsz, "bss limit = %d\n",
+ vif->bss_conf.txpower);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -607,6 +622,7 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
} while (0)
MVM_DEBUGFS_READ_FILE_OPS(mac_params);
+MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
@@ -641,6 +657,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 9ac04c1ea706..ffb4b5cef275 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -493,7 +493,8 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
struct iwl_bt_coex_profile_notif_old *notif =
&mvm->last_bt_notif_old;
@@ -550,7 +551,8 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
pos += scnprintf(buf+pos, bufsz-pos,
@@ -916,7 +918,8 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
if (mvm->scan_rx_ant != scan_rx_ant) {
mvm->scan_rx_ant = scan_rx_ant;
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_UMAC_SCAN))
iwl_mvm_config_scan(mvm);
}
@@ -1356,6 +1359,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
PRINT_MVM_REF(IWL_MVM_REF_SCAN);
PRINT_MVM_REF(IWL_MVM_REF_ROC);
+ PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
PRINT_MVM_REF(IWL_MVM_REF_USER);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index d6cced47d561..5e4cbdb44c60 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -274,50 +274,18 @@ struct iwl_scan_offload_profile_cfg {
} __packed;
/**
- * iwl_scan_offload_schedule - schedule of scan offload
+ * iwl_scan_schedule_lmac - schedule of scan offload
* @delay: delay between iterations, in seconds.
* @iterations: num of scan iterations
* @full_scan_mul: number of partial scans before each full scan
*/
-struct iwl_scan_offload_schedule {
+struct iwl_scan_schedule_lmac {
__le16 delay;
u8 iterations;
u8 full_scan_mul;
-} __packed;
-
-/*
- * iwl_scan_offload_flags
- *
- * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
- * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
- * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
- * beacon period. Finding channel activity in this mode is not guaranteed.
- * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
- * Assuming beacon period is 100ms finding channel activity is guaranteed.
- */
-enum iwl_scan_offload_flags {
- IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0),
- IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
- IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE = BIT(5),
- IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6),
-};
-
-/**
- * iwl_scan_offload_req - scan offload request command
- * @flags: bitmap - enum iwl_scan_offload_flags.
- * @watchdog: maximum scan duration in TU.
- * @delay: delay in seconds before first iteration.
- * @schedule_line: scan offload schedule, for fast and regular scan.
- */
-struct iwl_scan_offload_req {
- __le16 flags;
- __le16 watchdog;
- __le16 delay;
- __le16 reserved;
- struct iwl_scan_offload_schedule schedule_line[2];
-} __packed;
+} __packed; /* SCAN_SCHEDULE_API_S */
-enum iwl_scan_offload_compleate_status {
+enum iwl_scan_offload_complete_status {
IWL_SCAN_OFFLOAD_COMPLETED = 1,
IWL_SCAN_OFFLOAD_ABORTED = 2,
};
@@ -326,6 +294,7 @@ enum iwl_scan_ebs_status {
IWL_SCAN_EBS_SUCCESS,
IWL_SCAN_EBS_FAILED,
IWL_SCAN_EBS_CHAN_NOT_FOUND,
+ IWL_SCAN_EBS_INACTIVE,
};
/**
@@ -463,8 +432,19 @@ enum iwl_scan_priority {
IWL_SCAN_PRIORITY_HIGH,
};
+enum iwl_scan_priority_ext {
+ IWL_SCAN_PRIORITY_EXT_0_LOWEST,
+ IWL_SCAN_PRIORITY_EXT_1,
+ IWL_SCAN_PRIORITY_EXT_2,
+ IWL_SCAN_PRIORITY_EXT_3,
+ IWL_SCAN_PRIORITY_EXT_4,
+ IWL_SCAN_PRIORITY_EXT_5,
+ IWL_SCAN_PRIORITY_EXT_6,
+ IWL_SCAN_PRIORITY_EXT_7_HIGHEST,
+};
+
/**
- * iwl_scan_req_unified_lmac - SCAN_REQUEST_CMD_API_S_VER_1
+ * iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
* @reserved1: for alignment and future use
* @channel_num: num of channels to scan
* @active-dwell: dwell time for active channels
@@ -487,7 +467,7 @@ enum iwl_scan_priority {
* @channel_opt: channel optimization options, for full and partial scan
* @data: channel configuration and probe request packet.
*/
-struct iwl_scan_req_unified_lmac {
+struct iwl_scan_req_lmac {
/* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */
__le32 reserved1;
u8 n_channels;
@@ -508,7 +488,7 @@ struct iwl_scan_req_unified_lmac {
/* SCAN_REQ_PERIODIC_PARAMS_API_S */
__le32 iter_num;
__le32 delay;
- struct iwl_scan_offload_schedule schedule[2];
+ struct iwl_scan_schedule_lmac schedule[2];
struct iwl_scan_channel_opt channel_opt[2];
u8 data[];
} __packed;
@@ -582,7 +562,11 @@ struct iwl_mvm_umac_cmd_hdr {
u8 ver;
} __packed;
-#define IWL_MVM_MAX_SIMULTANEOUS_SCANS 8
+/* The maximum of either of these cannot exceed 8, because we use an
+ * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
+ */
+#define IWL_MVM_MAX_UMAC_SCANS 8
+#define IWL_MVM_MAX_LMAC_SCANS 1
enum scan_config_flags {
SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
@@ -865,4 +849,27 @@ struct iwl_scan_offload_profiles_query {
struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
+/**
+ * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @scanned_channels: number of channels scanned and number of valid elements in
+ * results array
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: array of scan results, only "scanned_channels" of them are valid
+ */
+struct iwl_umac_scan_iter_complete_notif {
+ __le32 uid;
+ u8 scanned_channels;
+ u8 status;
+ u8 bt_status;
+ u8 last_channel;
+ __le32 tsf_low;
+ __le32 tsf_high;
+ struct iwl_scan_results_notif results[];
+} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 01b1da6ad359..16e9ef49397f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,6 +108,7 @@ enum {
ANTENNA_COUPLING_NOTIFICATION = 0xa,
/* UMAC scan commands */
+ SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
SCAN_CFG_CMD = 0xc,
SCAN_REQ_UMAC = 0xd,
SCAN_ABORT_UMAC = 0xe,
@@ -147,13 +148,6 @@ enum {
LQ_CMD = 0x4e,
- /* Calibration */
- TEMPERATURE_NOTIFICATION = 0x62,
- CALIBRATION_CFG_CMD = 0x65,
- CALIBRATION_RES_NOTIFICATION = 0x66,
- CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
- RADIO_VERSION_NOTIFICATION = 0x68,
-
/* Scan offload */
SCAN_OFFLOAD_REQUEST_CMD = 0x51,
SCAN_OFFLOAD_ABORT_CMD = 0x52,
@@ -177,12 +171,8 @@ enum {
/* Thermal Throttling*/
REPLY_THERMAL_MNG_BACKOFF = 0x7e,
- /* Scanning */
- SCAN_REQUEST_CMD = 0x80,
- SCAN_ABORT_CMD = 0x81,
- SCAN_START_NOTIFICATION = 0x82,
- SCAN_RESULTS_NOTIFICATION = 0x83,
- SCAN_COMPLETE_NOTIFICATION = 0x84,
+ /* Set/Get DC2DC frequency tune */
+ DC2DC_CONFIG_CMD = 0x83,
/* NVM */
NVM_ACCESS_CMD = 0x88,
@@ -1402,6 +1392,49 @@ struct iwl_mvm_marker {
__le32 metadata[0];
} __packed; /* MARKER_API_S_VER_1 */
+/*
+ * enum iwl_dc2dc_config_id - flag ids
+ *
+ * Ids of dc2dc configuration flags
+ */
+enum iwl_dc2dc_config_id {
+ DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */
+ DCDC_FREQ_TUNE_SET = 0x2,
+}; /* MARKER_ID_API_E_VER_1 */
+
+/**
+ * struct iwl_dc2dc_config_cmd - configure dc2dc values
+ *
+ * (DC2DC_CONFIG_CMD = 0x83)
+ *
+ * Set/Get & configure dc2dc values.
+ * The command always returns the current dc2dc values.
+ *
+ * @flags: set/get dc2dc
+ * @enable_low_power_mode: not used.
+ * @dc2dc_freq_tune0: frequency divider - digital domain
+ * @dc2dc_freq_tune1: frequency divider - analog domain
+ */
+struct iwl_dc2dc_config_cmd {
+ __le32 flags;
+ __le32 enable_low_power_mode; /* not used */
+ __le32 dc2dc_freq_tune0;
+ __le32 dc2dc_freq_tune1;
+} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
+ *
+ * Current dc2dc values returned by the FW.
+ *
+ * @dc2dc_freq_tune0: frequency divider - digital domain
+ * @dc2dc_freq_tune1: frequency divider - analog domain
+ */
+struct iwl_dc2dc_config_resp {
+ __le32 dc2dc_freq_tune0;
+ __le32 dc2dc_freq_tune1;
+} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
+
/***********************************
* Smart Fifo API
***********************************/
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index df869633f4dd..eb10c5ee4a14 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -623,7 +623,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
if (!mvm->trans->ltr_enabled)
return 0;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_HDC_PHASE_0))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_HDC_PHASE_0))
return iwl_mvm_config_ltr_v1(mvm);
return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
@@ -662,9 +662,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
* device that are triggered by the INIT firwmare (MFUART).
*/
_iwl_trans_stop_device(mvm->trans, false);
- _iwl_trans_start_hw(mvm->trans, false);
+ ret = _iwl_trans_start_hw(mvm->trans, false);
if (ret)
- return ret;
+ goto error;
}
if (iwlmvm_mod_params.init_dbg)
@@ -754,7 +754,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
ret = iwl_mvm_config_scan(mvm);
if (ret)
goto error;
@@ -832,21 +832,6 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
return 0;
}
-int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_radio_version_notif *radio_version = (void *)pkt->data;
-
- /* TODO: what to do with that? */
- IWL_DEBUG_INFO(mvm,
- "Radio version: flavor: 0x%08x, step 0x%08x, dash 0x%08x\n",
- le32_to_cpu(radio_version->radio_flavor),
- le32_to_cpu(radio_version->radio_step),
- le32_to_cpu(radio_version->radio_dash));
- return 0;
-}
-
int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index dda9f7b5f342..08367fbc3bc4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -80,7 +80,6 @@
#include "sta.h"
#include "time-event.h"
#include "iwl-eeprom-parse.h"
-#include "fw-api-scan.h"
#include "iwl-phy-db.h"
#include "testmode.h"
#include "iwl-fw-error-dump.h"
@@ -319,7 +318,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
if (IS_ERR_OR_NULL(resp)) {
IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
- PTR_RET(resp));
+ PTR_ERR_OR_ZERO(resp));
goto out;
}
@@ -335,7 +334,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
kfree(resp);
if (IS_ERR_OR_NULL(regd)) {
IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
- PTR_RET(regd));
+ PTR_ERR_OR_ZERO(regd));
goto out;
}
@@ -416,6 +415,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
int num_mac, ret, i;
+ static const u32 mvm_ciphers[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ };
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
@@ -429,6 +434,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IEEE80211_HW_TIMING_BEACON_ONLY |
IEEE80211_HW_CONNECTION_MONITOR |
IEEE80211_HW_CHANCTX_STA_CSA |
+ IEEE80211_HW_SUPPORT_FAST_XMIT |
IEEE80211_HW_SUPPORTS_CLONED_SKBS;
hw->queues = mvm->first_agg_queue;
@@ -441,19 +447,38 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
+ memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
+ hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
+ hw->wiphy->cipher_suites = mvm->ciphers;
+
/*
* Enable 11w if advertised by firmware and software crypto
* is not enabled (as the firmware will interpret some mgmt
* packets, so enabling it with software crypto isn't safe)
*/
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
- !iwlwifi_mod_params.sw_crypto)
+ !iwlwifi_mod_params.sw_crypto) {
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_AES_CMAC;
+ hw->wiphy->n_cipher_suites++;
+ }
+
+ /* currently FW API supports only one optional cipher scheme */
+ if (mvm->fw->cs[0].cipher) {
+ mvm->hw->n_cipher_schemes = 1;
+ mvm->hw->cipher_schemes = &mvm->fw->cs[0];
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ mvm->fw->cs[0].cipher;
+ hw->wiphy->n_cipher_suites++;
+ }
hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
hw->wiphy->features |=
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
- NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@@ -506,10 +531,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm);
- hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm, false);
+ hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
+ BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
+ IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
+ else
+ mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
+
if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
@@ -517,10 +551,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
- if ((mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
- (mvm->fw->ucode_capa.api[0] &
- IWL_UCODE_TLV_API_LQ_SS_PARAMS))
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+ fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_LQ_SS_PARAMS))
hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
}
@@ -532,14 +566,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) {
- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
- hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
- hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
- /* we create the 802.11 header and zero length SSID IE. */
- hw->wiphy->max_sched_scan_ie_len =
- SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
- }
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+ hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+ /* we create the 802.11 header and zero length SSID IE. */
+ hw->wiphy->max_sched_scan_ie_len =
+ SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -548,30 +580,24 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
NL80211_FEATURE_STATIC_SMPS |
NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
- if (mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
- if (mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_QUIET;
- if (mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
hw->wiphy->features |=
NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
- if (mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
- /* currently FW API supports only one optional cipher scheme */
- if (mvm->fw->cs[0].cipher) {
- mvm->hw->n_cipher_schemes = 1;
- mvm->hw->cipher_schemes = &mvm->fw->cs[0];
- }
-
#ifdef CONFIG_PM_SLEEP
if (iwl_mvm_is_d0i3_supported(mvm) &&
device_can_wakeup(mvm->trans->dev)) {
@@ -611,13 +637,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (ret)
return ret;
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_TDLS_SUPPORT) {
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
}
- if (mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH) {
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
}
@@ -730,6 +757,60 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
return true;
}
+#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
+ do { \
+ if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
+ break; \
+ iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
+ } while (0)
+
+static void
+iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
+ enum ieee80211_ampdu_mlme_action action)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+ ba_trig = (void *)trig->data;
+
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+ return;
+
+ switch (action) {
+ case IEEE80211_AMPDU_TX_OPERATIONAL: {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
+ "TX AGG START: MAC %pM tid %d ssn %d\n",
+ sta->addr, tid, tid_data->ssn);
+ break;
+ }
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
+ "TX AGG STOP: MAC %pM tid %d\n",
+ sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_RX_START:
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
+ "RX AGG START: MAC %pM tid %d ssn %d\n",
+ sta->addr, tid, rx_ba_ssn);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
+ "RX AGG STOP: MAC %pM tid %d\n",
+ sta->addr, tid);
+ break;
+ default:
+ break;
+ }
+}
+
static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
@@ -806,6 +887,16 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
ret = -EINVAL;
break;
}
+
+ if (!ret) {
+ u16 rx_ba_ssn = 0;
+
+ if (action == IEEE80211_AMPDU_RX_START)
+ rx_ba_ssn = *ssn;
+
+ iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
+ rx_ba_ssn, action);
+ }
mutex_unlock(&mvm->mutex);
/*
@@ -1227,22 +1318,23 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_trans_stop_device(mvm->trans);
- mvm->scan_status = IWL_MVM_SCAN_NONE;
+ mvm->scan_status = 0;
mvm->ps_disabled = false;
mvm->calibrating = false;
/* just in case one was running */
ieee80211_remain_on_channel_expired(mvm->hw);
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
- iwl_mvm_cleanup_iterator, mvm);
+ /*
+ * cleanup all interfaces, even inactive ones, as some might have
+ * gone down during the HW restart
+ */
+ ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
mvm->p2p_device_vif = NULL;
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
iwl_mvm_reset_phy_ctxts(mvm);
- memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
@@ -1404,7 +1496,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
* The work item could be running or queued if the
* ROC time event stops just as we get here.
*/
- cancel_work_sync(&mvm->roc_done_wk);
+ flush_work(&mvm->roc_done_wk);
iwl_trans_stop_device(mvm->trans);
@@ -1417,20 +1509,24 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
* won't be called in this case).
+ * But make sure to cleanup interfaces that have gone down before/during
+ * HW restart was requested.
*/
- clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ ieee80211_iterate_interfaces(mvm->hw, 0,
+ iwl_mvm_cleanup_iterator, mvm);
/* We shouldn't have any UIDs still set. Loop over all the UIDs to
* make sure there's nothing left there and warn if any is found.
*/
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
int i;
- for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
- if (WARN_ONCE(mvm->scan_uid[i],
- "UMAC scan UID %d was not cleaned\n",
- mvm->scan_uid[i]))
- mvm->scan_uid[i] = 0;
+ for (i = 0; i < mvm->max_scans; i++) {
+ if (WARN_ONCE(mvm->scan_uid_status[i],
+ "UMAC scan UID %d status was not cleaned\n",
+ i))
+ mvm->scan_uid_status[i] = 0;
}
}
@@ -1495,7 +1591,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
.pwr_restriction = cpu_to_le16(8 * tx_power),
};
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_TX_POWER_DEV))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV))
return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
@@ -2354,7 +2450,7 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
- iwl_mvm_scan_offload_stop(mvm, true);
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@@ -2373,89 +2469,21 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
}
-static int iwl_mvm_cancel_scan_wait_notif(struct iwl_mvm *mvm,
- enum iwl_scan_status scan_type)
-{
- int ret;
- bool wait_for_handlers = false;
-
- mutex_lock(&mvm->mutex);
-
- if (mvm->scan_status != scan_type) {
- ret = 0;
- /* make sure there are no pending notifications */
- wait_for_handlers = true;
- goto out;
- }
-
- switch (scan_type) {
- case IWL_MVM_SCAN_SCHED:
- ret = iwl_mvm_scan_offload_stop(mvm, true);
- break;
- case IWL_MVM_SCAN_OS:
- ret = iwl_mvm_cancel_scan(mvm);
- break;
- case IWL_MVM_SCAN_NONE:
- default:
- WARN_ON_ONCE(1);
- ret = -EINVAL;
- break;
- }
- if (ret)
- goto out;
-
- wait_for_handlers = true;
-out:
- mutex_unlock(&mvm->mutex);
-
- /* make sure we consume the completion notification */
- if (wait_for_handlers)
- iwl_mvm_wait_for_async_handlers(mvm);
-
- return ret;
-}
static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct cfg80211_scan_request *req = &hw_req->req;
int ret;
- if (req->n_channels == 0 ||
- req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
+ if (hw_req->req.n_channels == 0 ||
+ hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
return -EINVAL;
- if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
- ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED);
- if (ret)
- return ret;
- }
-
mutex_lock(&mvm->mutex);
-
- if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
- IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
- ret = -EBUSY;
- goto out;
- }
-
- if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
- ret = -EBUSY;
- goto out;
- }
-
- iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
-
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
- ret = iwl_mvm_scan_umac(mvm, vif, hw_req);
- else
- ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
-
- if (ret)
- iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-out:
+ ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
mutex_unlock(&mvm->mutex);
+
return ret;
}
@@ -2473,12 +2501,8 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
* cancel scan scan before ieee80211_scan_work() could run.
* To handle that, simply return if the scan is not running.
*/
- /* FIXME: for now, we ignore this race for UMAC scans, since
- * they don't set the scan_status.
- */
- if ((mvm->scan_status == IWL_MVM_SCAN_OS) ||
- (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
- iwl_mvm_cancel_scan(mvm);
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
mutex_unlock(&mvm->mutex);
}
@@ -2794,35 +2818,17 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
struct ieee80211_scan_ies *ies)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
- if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
- ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
- if (ret)
- return ret;
- }
+ int ret;
mutex_lock(&mvm->mutex);
- if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
- IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
- ret = -EBUSY;
- goto out;
- }
-
if (!vif->bss_conf.idle) {
ret = -EBUSY;
goto out;
}
- if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
- ret = -EBUSY;
- goto out;
- }
-
- ret = iwl_mvm_scan_offload_start(mvm, vif, req, ies);
- if (ret)
- mvm->scan_status = IWL_MVM_SCAN_NONE;
+ ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
out:
mutex_unlock(&mvm->mutex);
@@ -2845,16 +2851,12 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
* could run. To handle this, simply return if the scan is
* not running.
*/
- /* FIXME: for now, we ignore this race for UMAC scans, since
- * they don't set the scan_status.
- */
- if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
- !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
mutex_unlock(&mvm->mutex);
return 0;
}
- ret = iwl_mvm_scan_offload_stop(mvm, false);
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
mutex_unlock(&mvm->mutex);
iwl_mvm_wait_for_async_handlers(mvm);
@@ -2922,8 +2924,21 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break;
}
+ /* During FW restart, in order to restore the state as it was,
+ * don't try to reprogram keys we previously failed for.
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ key->hw_key_idx == STA_KEY_IDX_INVALID) {
+ IWL_DEBUG_MAC80211(mvm,
+ "skip invalid idx key programming during restart\n");
+ ret = 0;
+ break;
+ }
+
IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
- ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, false);
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key,
+ test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status));
if (ret) {
IWL_WARN(mvm, "set key failed\n");
/*
@@ -3001,7 +3016,7 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
return true;
}
-#define AUX_ROC_MAX_DELAY_ON_CHANNEL 5000
+#define AUX_ROC_MAX_DELAY_ON_CHANNEL 200
static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
@@ -3106,8 +3121,8 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- if (mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
/* Use aux roc framework (HS20) */
ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
vif, duration);
@@ -3899,7 +3914,7 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
if (idx != 0)
return -ENOENT;
- if (!(mvm->fw->ucode_capa.capa[0] &
+ if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return -ENOENT;
@@ -3946,8 +3961,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (!(mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return;
/* if beacon filtering isn't on mac80211 does it anyway */
@@ -3977,9 +3992,9 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
-static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const struct ieee80211_event *event)
+static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
{
#define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
do { \
@@ -3988,7 +4003,6 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
} while (0)
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_mlme *trig_mlme;
@@ -4032,6 +4046,75 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
#undef CHECK_MLME_TRIGGER
}
+static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+ ba_trig = (void *)trig->data;
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+ return;
+
+ if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
+ return;
+
+ iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+ "BAR received from %pM, tid %d, ssn %d",
+ event->u.ba.sta->addr, event->u.ba.tid,
+ event->u.ba.ssn);
+}
+
+static void
+iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+ ba_trig = (void *)trig->data;
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+ return;
+
+ if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
+ return;
+
+ iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+ "Frame from %pM timed out, tid %d",
+ event->u.ba.sta->addr, event->u.ba.tid);
+}
+
+static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ switch (event->type) {
+ case MLME_EVENT:
+ iwl_mvm_event_mlme_callback(mvm, vif, event);
+ break;
+ case BAR_RX_EVENT:
+ iwl_mvm_event_bar_rx_callback(mvm, vif, event);
+ break;
+ case BA_FRAME_TIMEOUT:
+ iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
+ break;
+ default:
+ break;
+ }
+}
+
const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index cf70f681d1ac..2d4bad5fe825 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -76,6 +76,7 @@
#include "iwl-notif-wait.h"
#include "iwl-eeprom-parse.h"
#include "iwl-fw-file.h"
+#include "iwl-config.h"
#include "sta.h"
#include "fw-api.h"
#include "constants.h"
@@ -275,6 +276,7 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_UCODE_DOWN,
IWL_MVM_REF_SCAN,
IWL_MVM_REF_ROC,
+ IWL_MVM_REF_ROC_AUX,
IWL_MVM_REF_P2P_CLIENT,
IWL_MVM_REF_AP_IBSS,
IWL_MVM_REF_USER,
@@ -445,10 +447,26 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
extern const u8 tid_to_mac80211_ac[];
+#define IWL_MVM_SCAN_STOPPING_SHIFT 8
+
enum iwl_scan_status {
- IWL_MVM_SCAN_NONE,
- IWL_MVM_SCAN_OS,
- IWL_MVM_SCAN_SCHED,
+ IWL_MVM_SCAN_REGULAR = BIT(0),
+ IWL_MVM_SCAN_SCHED = BIT(1),
+ IWL_MVM_SCAN_NETDETECT = BIT(2),
+
+ IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8),
+ IWL_MVM_SCAN_STOPPING_SCHED = BIT(9),
+ IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10),
+
+ IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR |
+ IWL_MVM_SCAN_STOPPING_REGULAR,
+ IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED |
+ IWL_MVM_SCAN_STOPPING_SCHED,
+ IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT |
+ IWL_MVM_SCAN_STOPPING_NETDETECT,
+
+ IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
+ IWL_MVM_SCAN_MASK = 0xff,
};
/**
@@ -463,49 +481,6 @@ struct iwl_nvm_section {
const u8 *data;
};
-/*
- * Tx-backoff threshold
- * @temperature: The threshold in Celsius
- * @backoff: The tx-backoff in uSec
- */
-struct iwl_tt_tx_backoff {
- s32 temperature;
- u32 backoff;
-};
-
-#define TT_TX_BACKOFF_SIZE 6
-
-/**
- * struct iwl_tt_params - thermal throttling parameters
- * @ct_kill_entry: CT Kill entry threshold
- * @ct_kill_exit: CT Kill exit threshold
- * @ct_kill_duration: The time intervals (in uSec) in which the driver needs
- * to checks whether to exit CT Kill.
- * @dynamic_smps_entry: Dynamic SMPS entry threshold
- * @dynamic_smps_exit: Dynamic SMPS exit threshold
- * @tx_protection_entry: TX protection entry threshold
- * @tx_protection_exit: TX protection exit threshold
- * @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
- * @support_ct_kill: Support CT Kill?
- * @support_dynamic_smps: Support dynamic SMPS?
- * @support_tx_protection: Support tx protection?
- * @support_tx_backoff: Support tx-backoff?
- */
-struct iwl_tt_params {
- s32 ct_kill_entry;
- s32 ct_kill_exit;
- u32 ct_kill_duration;
- s32 dynamic_smps_entry;
- s32 dynamic_smps_exit;
- s32 tx_protection_entry;
- s32 tx_protection_exit;
- struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
- bool support_ct_kill;
- bool support_dynamic_smps;
- bool support_tx_protection;
- bool support_tx_backoff;
-};
-
/**
* struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
* @ct_kill_exit: worker to exit thermal kill
@@ -520,7 +495,7 @@ struct iwl_mvm_tt_mgmt {
bool dynamic_smps;
u32 tx_backoff;
u32 min_backoff;
- const struct iwl_tt_params *params;
+ struct iwl_tt_params params;
bool throttle;
};
@@ -647,13 +622,15 @@ struct iwl_mvm {
u32 rts_threshold;
/* Scan status, cmd (pre-allocated) and auxiliary station */
- enum iwl_scan_status scan_status;
+ unsigned int scan_status;
void *scan_cmd;
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
+ /* max number of simultaneous scans the FW supports */
+ unsigned int max_scans;
+
/* UMAC scan tracking */
- u32 scan_uid[IWL_MVM_MAX_SIMULTANEOUS_SCANS];
- u8 scan_seq_num, sched_scan_seq_num;
+ u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
@@ -843,6 +820,8 @@ struct iwl_mvm {
} tdls_cs;
struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
+
+ u32 ciphers[6];
};
/* Extract MVM priv from op_mode and _hw */
@@ -912,14 +891,15 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
return mvm->trans->cfg->d0i3 &&
mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF &&
!iwlwifi_mod_params.d0i3_disable &&
- (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
{
bool nvm_lar = mvm->nvm_data->lar_enabled;
- bool tlv_lar = mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+ bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
if (iwlwifi_mod_params.lar_disable)
return false;
@@ -936,24 +916,28 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
{
- return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WIFI_MCC_UPDATE ||
- mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC;
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
}
static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
{
- return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
+ return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCD_CFG);
}
static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
{
- return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
IWL_MVM_BT_COEX_CORUNNING;
}
static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
{
- return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
IWL_MVM_BT_COEX_RRC;
}
@@ -1083,8 +1067,6 @@ int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
@@ -1093,8 +1075,6 @@ int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
@@ -1146,48 +1126,38 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
struct ieee80211_vif *disabled_vif);
/* Scanning */
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies);
int iwl_mvm_scan_size(struct iwl_mvm *mvm);
-int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
-int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan);
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
+int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
/* Scheduled scan */
-int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
- struct cfg80211_sched_scan_request *req);
-int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_scan_ies *ies);
-int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify);
-int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-
-/* Unified scan */
-int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_scan_request *req);
-int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_scan_ies *ies);
+int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type);
+int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
/* UMAC scan */
int iwl_mvm_config_scan(struct iwl_mvm *mvm);
-int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_scan_request *req);
-int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_scan_ies *ies);
int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 87b2a30a2308..2a6be350704a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -316,8 +316,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
lar_enabled = !iwlwifi_mod_params.lar_disable &&
- (mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
regulatory, mac_override, phy_sku,
@@ -583,9 +583,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
kfree(nvm_buffer);
}
- /* load external NVM if configured */
+ /* Only if PNVM selected in the mod param - load external NVM */
if (mvm->nvm_file_name) {
- /* read External NVM file - take the default */
+ /* read External NVM file from the mod param */
ret = iwl_mvm_read_external_nvm(mvm);
if (ret) {
/* choose the nvm_file name according to the
@@ -792,8 +792,8 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
char mcc[3];
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
- tlv_lar = mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+ tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
nvm_lar = mvm->nvm_data->lar_enabled;
if (tlv_lar != nvm_lar)
IWL_INFO(mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 2ea01238754e..e4fa50075ffd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -194,7 +194,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
* (PCIe power is lost before PERST# is asserted), causing ME FW
* to lose ownership and not being able to obtain it back.
*/
- if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ if (!mvm->trans->cfg->apmg_not_supported)
iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
@@ -238,15 +238,16 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
RX_HANDLER(SCAN_ITERATION_COMPLETE,
- iwl_mvm_rx_scan_offload_iter_complete_notif, false),
+ iwl_mvm_rx_lmac_scan_iter_complete_notif, false),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
- iwl_mvm_rx_scan_offload_complete_notif, true),
- RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results,
+ iwl_mvm_rx_lmac_scan_complete_notif, true),
+ RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
false),
RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
true),
+ RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
+ iwl_mvm_rx_umac_scan_iter_complete_notif, false),
- RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
@@ -280,17 +281,11 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BINDING_CONTEXT_CMD),
CMD(TIME_QUOTA_CMD),
CMD(NON_QOS_TX_COUNTER_CMD),
- CMD(RADIO_VERSION_NOTIFICATION),
- CMD(SCAN_REQUEST_CMD),
- CMD(SCAN_ABORT_CMD),
- CMD(SCAN_START_NOTIFICATION),
- CMD(SCAN_RESULTS_NOTIFICATION),
- CMD(SCAN_COMPLETE_NOTIFICATION),
+ CMD(DC2DC_CONFIG_CMD),
CMD(NVM_ACCESS_CMD),
CMD(PHY_CONFIGURATION_CMD),
CMD(CALIB_RES_NOTIF_PHY_DB),
CMD(SET_CALIB_DEFAULT_CMD),
- CMD(CALIBRATION_COMPLETE_NOTIFICATION),
CMD(ADD_STA_KEY),
CMD(ADD_STA),
CMD(REMOVE_STA),
@@ -359,6 +354,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
CMD(TDLS_CONFIG_CMD),
CMD(MCC_UPDATE_CMD),
+ CMD(SCAN_ITERATION_COMPLETE_UMAC),
};
#undef CMD
@@ -520,15 +516,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
min_backoff = calc_min_backoff(trans, cfg);
iwl_mvm_tt_initialize(mvm, min_backoff);
- /* set the nvm_file_name according to priority */
- if (iwlwifi_mod_params.nvm_file) {
+
+ if (iwlwifi_mod_params.nvm_file)
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
- } else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
- if (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP)
- mvm->nvm_file_name = mvm->cfg->default_nvm_file_B_step;
- else
- mvm->nvm_file_name = mvm->cfg->default_nvm_file_C_step;
- }
+ else
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "working without external nvm file\n");
if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
"not allowing power-up and not having nvm_file\n"))
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 33cd68ae7bf9..daff1d0a8e4a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1,7 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -138,7 +138,7 @@ struct rs_tx_column;
typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl,
+ struct rs_rate *rate,
const struct rs_tx_column *next_col);
struct rs_tx_column {
@@ -150,14 +150,14 @@ struct rs_tx_column {
};
static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl,
+ struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
}
static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl,
+ struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
struct iwl_mvm_sta *mvmsta;
@@ -187,7 +187,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl,
+ struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
if (!sta->ht_cap.ht_supported)
@@ -197,10 +197,9 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl,
+ struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
- struct rs_rate *rate = &tbl->rate;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
@@ -1128,8 +1127,8 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
- bool allow_ant_mismatch = mvm->fw->ucode_capa.api[0] &
- IWL_UCODE_TLV_API_LQ_SS_PARAMS;
+ bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_LQ_SS_PARAMS);
/* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) {
@@ -1659,7 +1658,8 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
allow_func = next_col->checks[j];
- if (allow_func && !allow_func(mvm, sta, tbl, next_col))
+ if (allow_func && !allow_func(mvm, sta, &tbl->rate,
+ next_col))
break;
}
@@ -2136,7 +2136,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
}
/* current tx rate */
- index = lq_sta->last_txrate_idx;
+ index = rate->index;
/* rates available for this association, and for modulation mode */
rate_mask = rs_get_supported_rates(lq_sta, rate);
@@ -2184,14 +2184,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
* or search for a new one? */
rs_stay_in_table(lq_sta, false);
- goto out;
- }
- /* Else we have enough samples; calculate estimate of
- * actual average throughput */
- if (window->average_tpt != ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128)) {
- window->average_tpt = ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128);
+ return;
}
/* If we are searching for better modulation mode, check success. */
@@ -2403,9 +2396,6 @@ lq_update:
rs_set_stay_in_table(mvm, 0, lq_sta);
}
}
-
-out:
- lq_sta->last_txrate_idx = index;
}
struct rs_init_rate_info {
@@ -2548,7 +2538,6 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
rate = &tbl->rate;
rs_get_initial_rate(mvm, lq_sta, band, rate);
- lq_sta->last_txrate_idx = rate->index;
WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
if (rate->ant == ANT_A)
@@ -2725,7 +2714,7 @@ static void rs_vht_init(struct iwl_mvm *mvm,
(vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
lq_sta->stbc_capable = true;
- if ((mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
(vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
lq_sta->bfer_capable = true;
@@ -3009,7 +2998,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
/* TODO: remove old API when min FW API hits 14 */
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
rs_stbc_allow(mvm, sta, lq_sta))
rate.stbc = true;
@@ -3223,12 +3212,9 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS)
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS))
rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
- if (num_of_ant(initial_rate->ant) == 1)
- lq_cmd->single_stream_ant_msk = initial_rate->ant;
-
mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index e4aa9346a231..2a3da314305a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -322,8 +322,6 @@ struct iwl_lq_sta {
struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
u8 tx_agg_tid_en;
- /* used to be in sta_info */
- int last_txrate_idx;
/* last tx rate_n_flags */
u32 last_rate_n_flags;
/* packets destined for this STA are aggregated */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index d6314ddf57b5..8f1d93b7a13a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -570,7 +570,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
};
u32 temperature;
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_STATS_V10) {
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STATS_V10)) {
struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
if (iwl_rx_packet_payload_len(pkt) != v10_len)
@@ -610,7 +610,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
/* Only handle rx statistics temperature changes if async temp
* notifications are not supported
*/
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ASYNC_DTM))
iwl_mvm_tt_temp_changed(mvm, temperature);
ieee80211_iterate_active_interfaces(mvm->hw,
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 1075a213bd6a..5de144968723 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -67,11 +67,8 @@
#include <net/mac80211.h>
#include "mvm.h"
-#include "iwl-eeprom-parse.h"
#include "fw-api-scan.h"
-#define IWL_PLCP_QUIET_THRESH 1
-#define IWL_ACTIVE_QUIET_TIME 10
#define IWL_DENSE_EBS_SCAN_RATIO 5
#define IWL_SPARSE_EBS_SCAN_RATIO 1
@@ -79,23 +76,31 @@ struct iwl_mvm_scan_params {
u32 max_out_time;
u32 suspend_time;
bool passive_fragmented;
+ u32 n_channels;
+ u16 delay;
+ int n_ssids;
+ struct cfg80211_ssid *ssids;
+ struct ieee80211_channel **channels;
+ u16 interval; /* interval between scans (in secs) */
+ u32 flags;
+ u8 *mac_addr;
+ u8 *mac_addr_mask;
+ bool no_cck;
+ bool pass_all;
+ int n_match_sets;
+ struct iwl_scan_probe_req preq;
+ struct cfg80211_match_set *match_sets;
struct _dwell {
u16 passive;
u16 active;
u16 fragmented;
} dwell[IEEE80211_NUM_BANDS];
+ struct {
+ u8 iterations;
+ u8 full_scan_mul; /* not used for UMAC */
+ } schedule[2];
};
-enum iwl_umac_scan_uid_type {
- IWL_UMAC_SCAN_UID_REG_SCAN = BIT(0),
- IWL_UMAC_SCAN_UID_SCHED_SCAN = BIT(1),
- IWL_UMAC_SCAN_UID_ALL = IWL_UMAC_SCAN_UID_REG_SCAN |
- IWL_UMAC_SCAN_UID_SCHED_SCAN,
-};
-
-static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
- enum iwl_umac_scan_uid_type type, bool notify);
-
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
{
if (mvm->scan_rx_ant != ANT_NONE)
@@ -143,28 +148,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
}
/*
- * We insert the SSIDs in an inverted order, because the FW will
- * invert it back. The most prioritized SSID, which is first in the
- * request list, is not copied here, but inserted directly to the probe
- * request.
- */
-static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
- struct cfg80211_ssid *ssids,
- int n_ssids, int first)
-{
- int fw_idx, req_idx;
-
- for (req_idx = n_ssids - 1, fw_idx = 0; req_idx >= first;
- req_idx--, fw_idx++) {
- cmd_ssid[fw_idx].id = WLAN_EID_SSID;
- cmd_ssid[fw_idx].len = ssids[req_idx].ssid_len;
- memcpy(cmd_ssid[fw_idx].ssid,
- ssids[req_idx].ssid,
- ssids[req_idx].ssid_len);
- }
-}
-
-/*
* If req->n_ssids > 0, it means we should do an active scan.
* In case of active scan w/o directed scan, we receive a zero-length SSID
* just to notify that this scan is active and not passive.
@@ -177,7 +160,7 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
enum ieee80211_band band, int n_ssids)
{
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
return 10;
if (band == IEEE80211_BAND_2GHZ)
return 20 + 3 * (n_ssids + 1);
@@ -187,7 +170,7 @@ static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
enum ieee80211_band band)
{
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
return 110;
return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
}
@@ -203,10 +186,9 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
*global_cnt += 1;
}
-static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int n_ssids, u32 flags,
- struct iwl_mvm_scan_params *params)
+static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params)
{
int global_cnt = 0;
enum ieee80211_band band;
@@ -216,7 +198,6 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_condition_iterator,
&global_cnt);
-
if (!global_cnt)
goto not_bound;
@@ -224,8 +205,9 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
params->max_out_time = 120;
if (iwl_mvm_low_latency(mvm)) {
- if (mvm->fw->ucode_capa.api[0] &
- IWL_UCODE_TLV_API_FRAGMENTED_SCAN) {
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+
params->suspend_time = 105;
/*
* If there is more than one active interface make
@@ -239,8 +221,9 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
}
}
- if (frag_passive_dwell && (mvm->fw->ucode_capa.api[0] &
- IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+ if (frag_passive_dwell &&
+ fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
/*
* P2P device scan should not be fragmented to avoid negative
* impact on P2P device discovery. Configure max_out_time to be
@@ -257,7 +240,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
}
}
- if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
+ if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
+ (params->max_out_time > 200))
params->max_out_time = 200;
not_bound:
@@ -268,20 +252,34 @@ not_bound:
params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
band);
- params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
- n_ssids);
+ params->dwell[band].active =
+ iwl_mvm_get_active_dwell(mvm, band, params->n_ssids);
}
+
+ IWL_DEBUG_SCAN(mvm,
+ "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
+ params->max_out_time, params->suspend_time,
+ params->passive_fragmented);
+ IWL_DEBUG_SCAN(mvm,
+ "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n",
+ params->dwell[IEEE80211_BAND_2GHZ].passive,
+ params->dwell[IEEE80211_BAND_2GHZ].active,
+ params->dwell[IEEE80211_BAND_2GHZ].fragmented);
+ IWL_DEBUG_SCAN(mvm,
+ "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n",
+ params->dwell[IEEE80211_BAND_5GHZ].passive,
+ params->dwell[IEEE80211_BAND_5GHZ].active,
+ params->dwell[IEEE80211_BAND_5GHZ].fragmented);
}
static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
{
/* require rrm scan whenever the fw supports it */
- return mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT;
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
}
-static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
- bool is_sched_scan)
+static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
{
int max_probe_len;
@@ -297,9 +295,9 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
return max_probe_len;
}
-int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
+int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
{
- int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan);
+ int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
/* TODO: [BUG] This function should return the maximum allowed size of
* scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
@@ -314,22 +312,41 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
return max_ie_len;
}
-int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
+ int num_res, u8 *buf, size_t buf_size)
+{
+ int i;
+ u8 *pos = buf, *end = buf + buf_size;
+
+ for (i = 0; pos < end && i < num_res; i++)
+ pos += snprintf(pos, end - pos, " %u", res[i].channel);
+
+ /* terminate the string in case the buffer was too short */
+ *(buf + buf_size - 1) = '\0';
+
+ return buf;
+}
+
+int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
+ u8 buf[256];
IWL_DEBUG_SCAN(mvm,
- "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
- notif->status, notif->scanned_channels);
+ "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n",
+ notif->status, notif->scanned_channels,
+ iwl_mvm_dump_channel_list(notif->results,
+ notif->scanned_channels, buf,
+ sizeof(buf)));
return 0;
}
-int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
ieee80211_sched_scan_results(mvm->hw);
@@ -337,41 +354,78 @@ int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
return 0;
}
-int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_periodic_scan_complete *scan_notif;
+ switch (status) {
+ case IWL_SCAN_EBS_SUCCESS:
+ return "successful";
+ case IWL_SCAN_EBS_INACTIVE:
+ return "inactive";
+ case IWL_SCAN_EBS_FAILED:
+ case IWL_SCAN_EBS_CHAN_NOT_FOUND:
+ default:
+ return "failed";
+ }
+}
- scan_notif = (void *)pkt->data;
+int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
+ bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
/* scan status must be locked for proper checking */
lockdep_assert_held(&mvm->mutex);
- IWL_DEBUG_SCAN(mvm,
- "%s completed, status %s, EBS status %s\n",
- mvm->scan_status == IWL_MVM_SCAN_SCHED ?
- "Scheduled scan" : "Scan",
- scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
- "completed" : "aborted",
- scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
- "success" : "failed");
+ /* We first check if we were stopping a scan, in which case we
+ * just clear the stopping flag. Then we check if it was a
+ * firmware initiated stop, in which case we need to inform
+ * mac80211.
+ * Note that we can have a stopping and a running scan
+ * simultaneously, but we can't have two different types of
+ * scans stopping or running at the same time (since LMAC
+ * doesn't support it).
+ */
+
+ if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
+ WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
+
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+ mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
+ } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
+ IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
- /* only call mac80211 completion if the stop was initiated by FW */
- if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
- mvm->scan_status = IWL_MVM_SCAN_NONE;
+ mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
+ } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
+ WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
+
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+ mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
ieee80211_sched_scan_stopped(mvm->hw);
- } else if (mvm->scan_status == IWL_MVM_SCAN_OS) {
- mvm->scan_status = IWL_MVM_SCAN_NONE;
+ } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
+ IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+ mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
ieee80211_scan_completed(mvm->hw,
scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
}
- if (scan_notif->ebs_status)
- mvm->last_ebs_successful = false;
+ mvm->last_ebs_successful =
+ scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
+ scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
return 0;
}
@@ -390,9 +444,12 @@ static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
return -1;
}
-static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
- struct iwl_ssid_ie *direct_scan,
- u32 *ssid_bitmap, bool basic_ssid)
+/* We insert the SSIDs in an inverted order, because the FW will
+ * invert it back.
+ */
+static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
+ struct iwl_ssid_ie *ssids,
+ u32 *ssid_bitmap)
{
int i, j;
int index;
@@ -402,39 +459,41 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
* iwl_config_sched_scan_profiles() uses the order of these ssids to
* config match list.
*/
- for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
+ for (i = 0, j = params->n_match_sets - 1;
+ j >= 0 && i < PROBE_OPTION_MAX;
+ i++, j--) {
/* skip empty SSID matchsets */
- if (!req->match_sets[i].ssid.ssid_len)
+ if (!params->match_sets[j].ssid.ssid_len)
continue;
- direct_scan[i].id = WLAN_EID_SSID;
- direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
- memcpy(direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
- direct_scan[i].len);
+ ssids[i].id = WLAN_EID_SSID;
+ ssids[i].len = params->match_sets[j].ssid.ssid_len;
+ memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
+ ssids[i].len);
}
/* add SSIDs from scan SSID list */
*ssid_bitmap = 0;
- for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
- index = iwl_ssid_exist(req->ssids[j].ssid,
- req->ssids[j].ssid_len,
- direct_scan);
+ for (j = params->n_ssids - 1;
+ j >= 0 && i < PROBE_OPTION_MAX;
+ i++, j--) {
+ index = iwl_ssid_exist(params->ssids[j].ssid,
+ params->ssids[j].ssid_len,
+ ssids);
if (index < 0) {
- if (!req->ssids[j].ssid_len && basic_ssid)
- continue;
- direct_scan[i].id = WLAN_EID_SSID;
- direct_scan[i].len = req->ssids[j].ssid_len;
- memcpy(direct_scan[i].ssid, req->ssids[j].ssid,
- direct_scan[i].len);
- *ssid_bitmap |= BIT(i + 1);
- i++;
+ ssids[i].id = WLAN_EID_SSID;
+ ssids[i].len = params->ssids[j].ssid_len;
+ memcpy(ssids[i].ssid, params->ssids[j].ssid,
+ ssids[i].len);
+ *ssid_bitmap |= BIT(i);
} else {
- *ssid_bitmap |= BIT(index + 1);
+ *ssid_bitmap |= BIT(index);
}
}
}
-int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
- struct cfg80211_sched_scan_request *req)
+static int
+iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req)
{
struct iwl_scan_offload_profile *profile;
struct iwl_scan_offload_profile_cfg *profile_cfg;
@@ -515,30 +574,7 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
return true;
}
-int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_scan_ies *ies)
-{
- int ret;
-
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
- ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
- if (ret)
- return ret;
- ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
- } else {
- mvm->scan_status = IWL_MVM_SCAN_SCHED;
- ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
- if (ret)
- return ret;
- ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
- }
-
- return ret;
-}
-
-static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
+static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
{
int ret;
struct iwl_host_cmd cmd = {
@@ -546,12 +582,6 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
};
u32 status;
- /* Exit instantly with error when device is not ready
- * to receive scan abort command or it does not perform
- * scheduled scan currently */
- if (mvm->scan_status == IWL_MVM_SCAN_NONE)
- return -EIO;
-
ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@@ -571,69 +601,9 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
return ret;
}
-int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
-{
- int ret;
- struct iwl_notification_wait wait_scan_done;
- static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
- bool sched = mvm->scan_status == IWL_MVM_SCAN_SCHED;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
- return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
- notify);
-
- if (mvm->scan_status == IWL_MVM_SCAN_NONE)
- return 0;
-
- if (iwl_mvm_is_radio_killed(mvm)) {
- ret = 0;
- goto out;
- }
-
- iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
- scan_done_notif,
- ARRAY_SIZE(scan_done_notif),
- NULL, NULL);
-
- ret = iwl_mvm_send_scan_offload_abort(mvm);
- if (ret) {
- IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
- sched ? "offloaded " : "", ret);
- iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
- goto out;
- }
-
- IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
- sched ? "offloaded " : "");
-
- ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
-out:
- /*
- * Clear the scan status so the next scan requests will succeed. This
- * also ensures the Rx handler doesn't do anything, as the scan was
- * stopped from above. Since the rx handler won't do anything now,
- * we have to release the scan reference here.
- */
- if (mvm->scan_status == IWL_MVM_SCAN_OS)
- iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-
- mvm->scan_status = IWL_MVM_SCAN_NONE;
-
- if (notify) {
- if (sched)
- ieee80211_sched_scan_stopped(mvm->hw);
- else
- ieee80211_scan_completed(mvm->hw, true);
- }
-
- return ret;
-}
-
-static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
- struct iwl_scan_req_tx_cmd *tx_cmd,
- bool no_cck)
+static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
+ struct iwl_scan_req_tx_cmd *tx_cmd,
+ bool no_cck)
{
tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
TX_CMD_FLG_BT_DIS);
@@ -654,7 +624,7 @@ static void
iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
struct ieee80211_channel **channels,
int n_channels, u32 ssid_bitmap,
- struct iwl_scan_req_unified_lmac *cmd)
+ struct iwl_scan_req_lmac *cmd)
{
struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
int i;
@@ -707,13 +677,14 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
}
static void
-iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_scan_ies *ies,
- struct iwl_scan_probe_req *preq,
- const u8 *mac_addr, const u8 *mac_addr_mask)
+iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_scan_ies *ies,
+ struct iwl_mvm_scan_params *params)
{
- struct ieee80211_mgmt *frame = (struct ieee80211_mgmt *)preq->buf;
+ struct ieee80211_mgmt *frame = (void *)params->preq.buf;
u8 *pos, *newpos;
+ const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+ params->mac_addr : NULL;
/*
* Unfortunately, right now the offload scan doesn't support randomising
@@ -722,7 +693,8 @@ iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* random, only when it's restarted, but at least that helps a bit.
*/
if (mac_addr)
- get_random_mask_addr(frame->sa, mac_addr, mac_addr_mask);
+ get_random_mask_addr(frame->sa, mac_addr,
+ params->mac_addr_mask);
else
memcpy(frame->sa, vif->addr, ETH_ALEN);
@@ -735,245 +707,167 @@ iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
*pos++ = WLAN_EID_SSID;
*pos++ = 0;
- preq->mac_header.offset = 0;
- preq->mac_header.len = cpu_to_le16(24 + 2);
+ params->preq.mac_header.offset = 0;
+ params->preq.mac_header.len = cpu_to_le16(24 + 2);
/* Insert ds parameter set element on 2.4 GHz band */
newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
ies->ies[IEEE80211_BAND_2GHZ],
ies->len[IEEE80211_BAND_2GHZ],
pos);
- preq->band_data[0].offset = cpu_to_le16(pos - preq->buf);
- preq->band_data[0].len = cpu_to_le16(newpos - pos);
+ params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
pos = newpos;
memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
ies->len[IEEE80211_BAND_5GHZ]);
- preq->band_data[1].offset = cpu_to_le16(pos - preq->buf);
- preq->band_data[1].len = cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
+ params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[1].len =
+ cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
pos += ies->len[IEEE80211_BAND_5GHZ];
memcpy(pos, ies->common_ies, ies->common_ie_len);
- preq->common_data.offset = cpu_to_le16(pos - preq->buf);
- preq->common_data.len = cpu_to_le16(ies->common_ie_len);
+ params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
}
-static void
-iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
- struct iwl_scan_req_unified_lmac *cmd,
- struct iwl_mvm_scan_params *params)
+static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm,
+ enum iwl_scan_priority_ext prio)
+{
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY))
+ return cpu_to_le32(prio);
+
+ if (prio <= IWL_SCAN_PRIORITY_EXT_2)
+ return cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
+
+ if (prio <= IWL_SCAN_PRIORITY_EXT_4)
+ return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM);
+
+ return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+}
+
+static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
+ struct iwl_scan_req_lmac *cmd,
+ struct iwl_mvm_scan_params *params)
{
- memset(cmd, 0, ksize(cmd));
cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
if (params->passive_fragmented)
cmd->fragmented_dwell =
params->dwell[IEEE80211_BAND_2GHZ].fragmented;
- cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
cmd->max_out_time = cpu_to_le32(params->max_out_time);
cmd->suspend_time = cpu_to_le32(params->suspend_time);
- cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
- cmd->iter_num = cpu_to_le32(1);
-
- if (iwl_mvm_rrm_scan_needed(mvm))
- cmd->scan_flags |=
- cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
+ cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
}
-int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_scan_request *req)
+static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
+ struct ieee80211_scan_ies *ies,
+ int n_channels)
{
- struct iwl_host_cmd hcmd = {
- .id = SCAN_OFFLOAD_REQUEST_CMD,
- .len = { sizeof(struct iwl_scan_req_unified_lmac) +
- sizeof(struct iwl_scan_channel_cfg_lmac) *
- mvm->fw->ucode_capa.n_scan_channels +
- sizeof(struct iwl_scan_probe_req), },
- .data = { mvm->scan_cmd, },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
- struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
- struct iwl_scan_probe_req *preq;
- struct iwl_mvm_scan_params params = {};
- u32 flags;
- u32 ssid_bitmap = 0;
- int ret, i;
-
- lockdep_assert_held(&mvm->mutex);
-
- /* we should have failed registration if scan_cmd was NULL */
- if (WARN_ON(mvm->scan_cmd == NULL))
- return -ENOMEM;
-
- if (req->req.n_ssids > PROBE_OPTION_MAX ||
- req->ies.common_ie_len + req->ies.len[NL80211_BAND_2GHZ] +
- req->ies.len[NL80211_BAND_5GHZ] >
- iwl_mvm_max_scan_ie_fw_cmd_room(mvm, false) ||
- req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
- return -ENOBUFS;
+ return ((n_ssids <= PROBE_OPTION_MAX) &&
+ (n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
+ (ies->common_ie_len +
+ ies->len[NL80211_BAND_2GHZ] +
+ ies->len[NL80211_BAND_5GHZ] <=
+ iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
+}
- mvm->scan_status = IWL_MVM_SCAN_OS;
+static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int n_iterations)
+{
+ const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
- iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
- &params);
+ /* We can only use EBS if:
+ * 1. the feature is supported;
+ * 2. the last EBS was successful;
+ * 3. if only single scan, the single scan EBS API is supported;
+ * 4. it's not a p2p find operation.
+ */
+ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
+ mvm->last_ebs_successful &&
+ (n_iterations > 1 ||
+ fw_has_api(capa, IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)) &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE);
+}
- iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
+static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
+{
+ return params->schedule[0].iterations + params->schedule[1].iterations;
+}
- cmd->n_channels = (u8)req->req.n_channels;
+static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params)
+{
+ int flags = 0;
- flags = IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+ if (params->n_ssids == 0)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
- if (req->req.n_ssids == 1 && req->req.ssids[0].ssid_len != 0)
+ if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
- if (params.passive_fragmented)
+ if (params->passive_fragmented)
flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
- if (req->req.n_ssids == 0)
- flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
-
- cmd->scan_flags |= cpu_to_le32(flags);
-
- cmd->flags = iwl_mvm_scan_rxon_flags(req->req.channels[0]->band);
- cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
- MAC_FILTER_IN_BEACON);
- iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, req->req.no_cck);
- iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->req.ssids,
- req->req.n_ssids, 0);
-
- cmd->schedule[0].delay = 0;
- cmd->schedule[0].iterations = 1;
- cmd->schedule[0].full_scan_mul = 0;
- cmd->schedule[1].delay = 0;
- cmd->schedule[1].iterations = 0;
- cmd->schedule[1].full_scan_mul = 0;
-
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
- mvm->last_ebs_successful) {
- cmd->channel_opt[0].flags =
- cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
- cmd->channel_opt[0].non_ebs_ratio =
- cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
- cmd->channel_opt[1].flags =
- cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
- cmd->channel_opt[1].non_ebs_ratio =
- cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
- }
-
- for (i = 1; i <= req->req.n_ssids; i++)
- ssid_bitmap |= BIT(i);
-
- iwl_mvm_lmac_scan_cfg_channels(mvm, req->req.channels,
- req->req.n_channels, ssid_bitmap,
- cmd);
+ if (iwl_mvm_rrm_scan_needed(mvm))
+ flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
- preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
- mvm->fw->ucode_capa.n_scan_channels);
+ if (params->pass_all)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+ else
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
- iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, preq,
- req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
- req->req.mac_addr : NULL,
- req->req.mac_addr_mask);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->scan_iter_notif_enabled)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
+#endif
- ret = iwl_mvm_send_cmd(mvm, &hcmd);
- if (!ret) {
- IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
- } else {
- /*
- * If the scan failed, it usually means that the FW was unable
- * to allocate the time events. Warn on it, but maybe we
- * should try to send the command again with different params.
- */
- IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
- mvm->scan_status = IWL_MVM_SCAN_NONE;
- ret = -EIO;
- }
- return ret;
+ return flags;
}
-int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_scan_ies *ies)
+static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params)
{
- struct iwl_host_cmd hcmd = {
- .id = SCAN_OFFLOAD_REQUEST_CMD,
- .len = { sizeof(struct iwl_scan_req_unified_lmac) +
- sizeof(struct iwl_scan_channel_cfg_lmac) *
- mvm->fw->ucode_capa.n_scan_channels +
- sizeof(struct iwl_scan_probe_req), },
- .data = { mvm->scan_cmd, },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
- struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
- struct iwl_scan_probe_req *preq;
- struct iwl_mvm_scan_params params = {};
- int ret;
- u32 flags = 0, ssid_bitmap = 0;
+ struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
+ struct iwl_scan_probe_req *preq =
+ (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
+ mvm->fw->ucode_capa.n_scan_channels);
+ u32 ssid_bitmap = 0;
+ int n_iterations = iwl_mvm_scan_total_iterations(params);
lockdep_assert_held(&mvm->mutex);
- /* we should have failed registration if scan_cmd was NULL */
- if (WARN_ON(mvm->scan_cmd == NULL))
- return -ENOMEM;
-
- if (req->n_ssids > PROBE_OPTION_MAX ||
- ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
- ies->len[NL80211_BAND_5GHZ] >
- iwl_mvm_max_scan_ie_fw_cmd_room(mvm, true) ||
- req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
- return -ENOBUFS;
-
- iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
-
- iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
-
- cmd->n_channels = (u8)req->n_channels;
-
- cmd->delay = cpu_to_le32(req->delay);
-
- if (iwl_mvm_scan_pass_all(mvm, req))
- flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
- else
- flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
+ memset(cmd, 0, ksize(cmd));
- if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
- flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
+ iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
- if (params.passive_fragmented)
- flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
-
- if (req->n_ssids == 0)
- flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
+ cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
+ cmd->iter_num = cpu_to_le32(1);
+ cmd->n_channels = (u8)params->n_channels;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvm->scan_iter_notif_enabled)
- flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
-#endif
+ cmd->delay = cpu_to_le32(params->delay);
- cmd->scan_flags |= cpu_to_le32(flags);
+ cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params));
- cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
+ cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
- iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, false);
- iwl_scan_offload_build_ssid(req, cmd->direct_scan, &ssid_bitmap, false);
+ iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
+ iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
- cmd->schedule[0].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
- cmd->schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
- cmd->schedule[0].full_scan_mul = 1;
+ /* this API uses bits 1-20 instead of 0-19 */
+ ssid_bitmap <<= 1;
- cmd->schedule[1].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
- cmd->schedule[1].iterations = 0xff;
- cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+ cmd->schedule[0].delay = cpu_to_le16(params->interval);
+ cmd->schedule[0].iterations = params->schedule[0].iterations;
+ cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul;
+ cmd->schedule[1].delay = cpu_to_le16(params->interval);
+ cmd->schedule[1].iterations = params->schedule[1].iterations;
+ cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
- mvm->last_ebs_successful) {
+ if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
cmd->channel_opt[0].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
@@ -988,61 +882,14 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
}
- iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
- ssid_bitmap, cmd);
-
- preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
- mvm->fw->ucode_capa.n_scan_channels);
+ iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
+ params->n_channels, ssid_bitmap, cmd);
- iwl_mvm_build_unified_scan_probe(mvm, vif, ies, preq,
- req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
- req->mac_addr : NULL,
- req->mac_addr_mask);
+ *preq = params->preq;
- ret = iwl_mvm_send_cmd(mvm, &hcmd);
- if (!ret) {
- IWL_DEBUG_SCAN(mvm,
- "Sched scan request was sent successfully\n");
- } else {
- /*
- * If the scan failed, it usually means that the FW was unable
- * to allocate the time events. Warn on it, but maybe we
- * should try to send the command again with different params.
- */
- IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
- mvm->scan_status = IWL_MVM_SCAN_NONE;
- ret = -EIO;
- }
- return ret;
-}
-
-
-int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
-{
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
- return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN,
- true);
-
- if (mvm->scan_status == IWL_MVM_SCAN_NONE)
- return 0;
-
- if (iwl_mvm_is_radio_killed(mvm)) {
- ieee80211_scan_completed(mvm->hw, true);
- iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
- mvm->scan_status = IWL_MVM_SCAN_NONE;
- return 0;
- }
-
- return iwl_mvm_scan_offload_stop(mvm, true);
+ return 0;
}
-/* UMAC scan API */
-
-struct iwl_umac_scan_done {
- struct iwl_mvm *mvm;
- enum iwl_umac_scan_uid_type type;
-};
-
static int rate_to_scan_rate_flag(unsigned int rate)
{
static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
@@ -1151,79 +998,21 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
return ret;
}
-static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid)
-{
- int i;
-
- for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
- if (mvm->scan_uid[i] == uid)
- return i;
-
- return i;
-}
-
-static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm)
-{
- return iwl_mvm_find_scan_uid(mvm, 0);
-}
-
-static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm,
- enum iwl_umac_scan_uid_type type)
-{
- int i;
-
- for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
- if (mvm->scan_uid[i] & type)
- return true;
-
- return false;
-}
-
-static int iwl_mvm_find_first_scan(struct iwl_mvm *mvm,
- enum iwl_umac_scan_uid_type type)
+static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
{
int i;
- for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
- if (mvm->scan_uid[i] & type)
+ for (i = 0; i < mvm->max_scans; i++)
+ if (mvm->scan_uid_status[i] == status)
return i;
- return i;
+ return -ENOENT;
}
-static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm,
- enum iwl_umac_scan_uid_type type)
-{
- u32 uid;
-
- /* make sure exactly one bit is on in scan type */
- WARN_ON(hweight8(type) != 1);
-
- /*
- * Make sure scan uids are unique. If one scan lasts long time while
- * others are completing frequently, the seq number will wrap up and
- * we may have more than one scan with the same uid.
- */
- do {
- uid = type | (mvm->scan_seq_num <<
- IWL_UMAC_SCAN_UID_SEQ_OFFSET);
- mvm->scan_seq_num++;
- } while (iwl_mvm_find_scan_uid(mvm, uid) <
- IWL_MVM_MAX_SIMULTANEOUS_SCANS);
-
- IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid);
-
- return uid;
-}
-
-static void
-iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
+static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_umac *cmd,
struct iwl_mvm_scan_params *params)
{
- memset(cmd, 0, ksize(cmd));
- cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
- sizeof(struct iwl_mvm_umac_cmd_hdr));
cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
if (params->passive_fragmented)
@@ -1231,7 +1020,15 @@ iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
params->dwell[IEEE80211_BAND_2GHZ].fragmented;
cmd->max_out_time = cpu_to_le32(params->max_out_time);
cmd->suspend_time = cpu_to_le32(params->suspend_time);
- cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+ cmd->scan_priority =
+ iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+
+ if (iwl_mvm_scan_total_iterations(params) == 0)
+ cmd->ooc_priority =
+ iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+ else
+ cmd->ooc_priority =
+ iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2);
}
static void
@@ -1251,230 +1048,326 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
}
}
-static u32 iwl_mvm_scan_umac_common_flags(struct iwl_mvm *mvm, int n_ssids,
- struct cfg80211_ssid *ssids,
- int fragmented)
+static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params)
{
int flags = 0;
- if (n_ssids == 0)
+ if (params->n_ssids == 0)
flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
- if (n_ssids == 1 && ssids[0].ssid_len != 0)
+ if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
- if (fragmented)
+ if (params->passive_fragmented)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
+ if (params->pass_all)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
+ else
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
+
+ if (iwl_mvm_scan_total_iterations(params) > 1)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->scan_iter_notif_enabled)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+#endif
return flags;
}
-int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_scan_request *req)
+static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params,
+ int type)
{
- struct iwl_host_cmd hcmd = {
- .id = SCAN_REQ_UMAC,
- .len = { iwl_mvm_scan_size(mvm), },
- .data = { mvm->scan_cmd, },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
- struct iwl_mvm_scan_params params = {};
- u32 uid, flags;
+ int uid;
u32 ssid_bitmap = 0;
- int ret, i, uid_idx;
+ int n_iterations = iwl_mvm_scan_total_iterations(params);
lockdep_assert_held(&mvm->mutex);
- uid_idx = iwl_mvm_find_free_scan_uid(mvm);
- if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
- return -EBUSY;
+ uid = iwl_mvm_scan_uid_by_status(mvm, 0);
+ if (uid < 0)
+ return uid;
- /* we should have failed registration if scan_cmd was NULL */
- if (WARN_ON(mvm->scan_cmd == NULL))
- return -ENOMEM;
-
- if (WARN_ON(req->req.n_ssids > PROBE_OPTION_MAX ||
- req->ies.common_ie_len +
- req->ies.len[NL80211_BAND_2GHZ] +
- req->ies.len[NL80211_BAND_5GHZ] + 24 + 2 >
- SCAN_OFFLOAD_PROBE_REQ_SIZE || req->req.n_channels >
- mvm->fw->ucode_capa.n_scan_channels))
- return -ENOBUFS;
+ memset(cmd, 0, ksize(cmd));
+ cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
+ sizeof(struct iwl_mvm_umac_cmd_hdr));
- iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
- &params);
+ iwl_mvm_scan_umac_dwell(mvm, cmd, params);
- iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
+ mvm->scan_uid_status[uid] = type;
- uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
- mvm->scan_uid[uid_idx] = uid;
cmd->uid = cpu_to_le32(uid);
+ cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
-
- flags = iwl_mvm_scan_umac_common_flags(mvm, req->req.n_ssids,
- req->req.ssids,
- params.passive_fragmented);
-
- flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
-
- cmd->general_flags = cpu_to_le32(flags);
-
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
- mvm->last_ebs_successful)
+ if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
- cmd->n_channels = req->req.n_channels;
+ cmd->n_channels = params->n_channels;
+
+ iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
- for (i = 0; i < req->req.n_ssids; i++)
- ssid_bitmap |= BIT(i);
+ iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
+ params->n_channels, ssid_bitmap, cmd);
- iwl_mvm_umac_scan_cfg_channels(mvm, req->req.channels,
- req->req.n_channels, ssid_bitmap, cmd);
+ /* With UMAC we use only one schedule for now, so use the sum
+ * of the iterations (with a a maximum of 255).
+ */
+ sec_part->schedule[0].iter_count =
+ (n_iterations > 255) ? 255 : n_iterations;
+ sec_part->schedule[0].interval = cpu_to_le16(params->interval);
- sec_part->schedule[0].iter_count = 1;
- sec_part->delay = 0;
+ sec_part->delay = cpu_to_le16(params->delay);
+ sec_part->preq = params->preq;
- iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, &sec_part->preq,
- req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
- req->req.mac_addr : NULL,
- req->req.mac_addr_mask);
+ return 0;
+}
- iwl_mvm_scan_fill_ssids(sec_part->direct_scan, req->req.ssids,
- req->req.n_ssids, 0);
+static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
+{
+ return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
+}
- ret = iwl_mvm_send_cmd(mvm, &hcmd);
- if (!ret) {
- IWL_DEBUG_SCAN(mvm,
- "Scan request was sent successfully\n");
- } else {
- /*
- * If the scan failed, it usually means that the FW was unable
- * to allocate the time events. Warn on it, but maybe we
- * should try to send the command again with different params.
- */
- IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
+{
+ /* This looks a bit arbitrary, but the idea is that if we run
+ * out of possible simultaneous scans and the userspace is
+ * trying to run a scan type that is already running, we
+ * return -EBUSY. But if the userspace wants to start a
+ * different type of scan, we stop the opposite type to make
+ * space for the new request. The reason is backwards
+ * compatibility with old wpa_supplicant that wouldn't stop a
+ * scheduled scan before starting a normal scan.
+ */
+
+ if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
+ return 0;
+
+ /* Use a switch, even though this is a bitmask, so that more
+ * than one bits set will fall in default and we will warn.
+ */
+ switch (type) {
+ case IWL_MVM_SCAN_REGULAR:
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
+ return -EBUSY;
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+ case IWL_MVM_SCAN_SCHED:
+ if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
+ return -EBUSY;
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+ case IWL_MVM_SCAN_NETDETECT:
+ /* No need to stop anything for net-detect since the
+ * firmware is restarted anyway. This way, any sched
+ * scans that were running will be restarted when we
+ * resume.
+ */
+ return 0;
+ default:
+ WARN_ON(1);
+ break;
}
- return ret;
+
+ return -EIO;
}
-int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_scan_ies *ies)
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies)
{
-
struct iwl_host_cmd hcmd = {
- .id = SCAN_REQ_UMAC,
.len = { iwl_mvm_scan_size(mvm), },
.data = { mvm->scan_cmd, },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
- struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
- struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
- sizeof(struct iwl_scan_channel_cfg_umac) *
- mvm->fw->ucode_capa.n_scan_channels;
struct iwl_mvm_scan_params params = {};
- u32 uid, flags;
- u32 ssid_bitmap = 0;
- int ret, uid_idx;
+ int ret;
lockdep_assert_held(&mvm->mutex);
- uid_idx = iwl_mvm_find_free_scan_uid(mvm);
- if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+ if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+ IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
return -EBUSY;
+ }
+
+ ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
+ if (ret)
+ return ret;
+
+ iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
/* we should have failed registration if scan_cmd was NULL */
- if (WARN_ON(mvm->scan_cmd == NULL))
+ if (WARN_ON(!mvm->scan_cmd))
return -ENOMEM;
- if (WARN_ON(req->n_ssids > PROBE_OPTION_MAX ||
- ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
- ies->len[NL80211_BAND_5GHZ] + 24 + 2 >
- SCAN_OFFLOAD_PROBE_REQ_SIZE || req->n_channels >
- mvm->fw->ucode_capa.n_scan_channels))
+ if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
return -ENOBUFS;
- iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags,
- &params);
-
- iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
-
- cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
-
- uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN);
- mvm->scan_uid[uid_idx] = uid;
- cmd->uid = cpu_to_le32(uid);
+ params.n_ssids = req->n_ssids;
+ params.flags = req->flags;
+ params.n_channels = req->n_channels;
+ params.delay = 0;
+ params.interval = 0;
+ params.ssids = req->ssids;
+ params.channels = req->channels;
+ params.mac_addr = req->mac_addr;
+ params.mac_addr_mask = req->mac_addr_mask;
+ params.no_cck = req->no_cck;
+ params.pass_all = true;
+ params.n_match_sets = 0;
+ params.match_sets = NULL;
+
+ params.schedule[0].iterations = 1;
+ params.schedule[0].full_scan_mul = 0;
+ params.schedule[1].iterations = 0;
+ params.schedule[1].full_scan_mul = 0;
+
+ iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+
+ iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ hcmd.id = SCAN_REQ_UMAC;
+ ret = iwl_mvm_scan_umac(mvm, vif, &params,
+ IWL_MVM_SCAN_REGULAR);
+ } else {
+ hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
+ ret = iwl_mvm_scan_lmac(mvm, vif, &params);
+ }
- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
+ if (ret)
+ return ret;
- flags = iwl_mvm_scan_umac_common_flags(mvm, req->n_ssids, req->ssids,
- params.passive_fragmented);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (!ret) {
+ IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
+ mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
+ } else {
+ /* If the scan failed, it usually means that the FW was unable
+ * to allocate the time events. Warn on it, but maybe we
+ * should try to send the command again with different params.
+ */
+ IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+ }
- flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+ if (ret)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
- if (iwl_mvm_scan_pass_all(mvm, req))
- flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
- else
- flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
+ return ret;
+}
- cmd->general_flags = cpu_to_le32(flags);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type)
+{
+ struct iwl_host_cmd hcmd = {
+ .len = { iwl_mvm_scan_size(mvm), },
+ .data = { mvm->scan_cmd, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_mvm_scan_params params = {};
+ int ret;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
- mvm->last_ebs_successful)
- cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+ lockdep_assert_held(&mvm->mutex);
- cmd->n_channels = req->n_channels;
+ if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+ IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
+ return -EBUSY;
+ }
- iwl_scan_offload_build_ssid(req, sec_part->direct_scan, &ssid_bitmap,
- false);
+ ret = iwl_mvm_check_running_scans(mvm, type);
+ if (ret)
+ return ret;
- /* This API uses bits 0-19 instead of 1-20. */
- ssid_bitmap = ssid_bitmap >> 1;
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(!mvm->scan_cmd))
+ return -ENOMEM;
- iwl_mvm_umac_scan_cfg_channels(mvm, req->channels, req->n_channels,
- ssid_bitmap, cmd);
+ if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
+ return -ENOBUFS;
- sec_part->schedule[0].interval =
- cpu_to_le16(req->interval / MSEC_PER_SEC);
- sec_part->schedule[0].iter_count = 0xff;
+ params.n_ssids = req->n_ssids;
+ params.flags = req->flags;
+ params.n_channels = req->n_channels;
+ params.ssids = req->ssids;
+ params.channels = req->channels;
+ params.mac_addr = req->mac_addr;
+ params.mac_addr_mask = req->mac_addr_mask;
+ params.no_cck = false;
+ params.pass_all = iwl_mvm_scan_pass_all(mvm, req);
+ params.n_match_sets = req->n_match_sets;
+ params.match_sets = req->match_sets;
+
+ params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
+ params.schedule[0].full_scan_mul = 1;
+ params.schedule[1].iterations = 0xff;
+ params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+
+ if (req->interval > U16_MAX) {
+ IWL_DEBUG_SCAN(mvm,
+ "interval value is > 16-bits, set to max possible\n");
+ params.interval = U16_MAX;
+ } else {
+ params.interval = req->interval / MSEC_PER_SEC;
+ }
+ /* In theory, LMAC scans can handle a 32-bit delay, but since
+ * waiting for over 18 hours to start the scan is a bit silly
+ * and to keep it aligned with UMAC scans (which only support
+ * 16-bit delays), trim it down to 16-bits.
+ */
if (req->delay > U16_MAX) {
IWL_DEBUG_SCAN(mvm,
"delay value is > 16-bits, set to max possible\n");
- sec_part->delay = cpu_to_le16(U16_MAX);
+ params.delay = U16_MAX;
+ } else {
+ params.delay = req->delay;
+ }
+
+ iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+
+ ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ if (ret)
+ return ret;
+
+ iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ hcmd.id = SCAN_REQ_UMAC;
+ ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
} else {
- sec_part->delay = cpu_to_le16(req->delay);
+ hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
+ ret = iwl_mvm_scan_lmac(mvm, vif, &params);
}
- iwl_mvm_build_unified_scan_probe(mvm, vif, ies, &sec_part->preq,
- req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
- req->mac_addr : NULL,
- req->mac_addr_mask);
+ if (ret)
+ return ret;
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) {
IWL_DEBUG_SCAN(mvm,
"Sched scan request was sent successfully\n");
+ mvm->scan_status |= type;
} else {
- /*
- * If the scan failed, it usually means that the FW was unable
+ /* If the scan failed, it usually means that the FW was unable
* to allocate the time events. Warn on it, but maybe we
* should try to send the command again with different params.
*/
IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
}
+
return ret;
}
@@ -1485,150 +1378,124 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_umac_scan_complete *notif = (void *)pkt->data;
u32 uid = __le32_to_cpu(notif->uid);
- bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN);
- int uid_idx = iwl_mvm_find_scan_uid(mvm, uid);
+ bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
- /*
- * Scan uid may be set to zero in case of scan abort request from above.
- */
- if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+ if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
return 0;
+ /* if the scan is already stopping, we don't need to notify mac80211 */
+ if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
+ ieee80211_scan_completed(mvm->hw, aborted);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
+ ieee80211_sched_scan_stopped(mvm->hw);
+ }
+
+ mvm->scan_status &= ~mvm->scan_uid_status[uid];
+
IWL_DEBUG_SCAN(mvm,
- "Scan completed, uid %u type %s, status %s, EBS status %s\n",
- uid, sched ? "sched" : "regular",
+ "Scan completed, uid %u type %u, status %s, EBS status %s\n",
+ uid, mvm->scan_uid_status[uid],
notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
"completed" : "aborted",
- notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
- "success" : "failed");
+ iwl_mvm_ebs_status_str(notif->ebs_status));
- if (notif->ebs_status)
+ if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
+ notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
mvm->last_ebs_successful = false;
- mvm->scan_uid[uid_idx] = 0;
-
- if (!sched) {
- ieee80211_scan_completed(mvm->hw,
- notif->status ==
- IWL_SCAN_OFFLOAD_ABORTED);
- iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
- } else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) {
- ieee80211_sched_scan_stopped(mvm->hw);
- } else {
- IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n");
- }
+ mvm->scan_uid_status[uid] = 0;
return 0;
}
-static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
- struct iwl_rx_packet *pkt, void *data)
+int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
- struct iwl_umac_scan_done *scan_done = data;
- struct iwl_umac_scan_complete *notif = (void *)pkt->data;
- u32 uid = __le32_to_cpu(notif->uid);
- int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid);
-
- if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC))
- return false;
-
- if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
- return false;
-
- /*
- * Clear scan uid of scans that was aborted from above and completed
- * in FW so the RX handler does nothing. Set last_ebs_successful here if
- * needed.
- */
- scan_done->mvm->scan_uid[uid_idx] = 0;
-
- if (notif->ebs_status)
- scan_done->mvm->last_ebs_successful = false;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
+ u8 buf[256];
- return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
+ IWL_DEBUG_SCAN(mvm,
+ "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
+ notif->status, notif->scanned_channels,
+ iwl_mvm_dump_channel_list(notif->results,
+ notif->scanned_channels, buf,
+ sizeof(buf)));
+ return 0;
}
-static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid)
+static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
{
struct iwl_umac_scan_abort cmd = {
.hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
sizeof(struct iwl_mvm_umac_cmd_hdr)),
- .uid = cpu_to_le32(uid),
};
+ int uid, ret;
lockdep_assert_held(&mvm->mutex);
+ /* We should always get a valid index here, because we already
+ * checked that this type of scan was running in the generic
+ * code.
+ */
+ uid = iwl_mvm_scan_uid_by_status(mvm, type);
+ if (WARN_ON_ONCE(uid < 0))
+ return uid;
+
+ cmd.uid = cpu_to_le32(uid);
+
IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
- return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+ if (!ret)
+ mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+
+ return ret;
}
-static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
- enum iwl_umac_scan_uid_type type, bool notify)
+static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
{
struct iwl_notification_wait wait_scan_done;
- static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, };
- struct iwl_umac_scan_done scan_done = {
- .mvm = mvm,
- .type = type,
- };
- int i, ret = -EIO;
+ static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+ SCAN_OFFLOAD_COMPLETE, };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
scan_done_notif,
ARRAY_SIZE(scan_done_notif),
- iwl_scan_umac_done_check, &scan_done);
+ NULL, NULL);
IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
- for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
- if (mvm->scan_uid[i] & type) {
- int err;
-
- if (iwl_mvm_is_radio_killed(mvm) &&
- (type & IWL_UMAC_SCAN_UID_REG_SCAN)) {
- ieee80211_scan_completed(mvm->hw, true);
- iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
- break;
- }
-
- err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]);
- if (!err)
- ret = 0;
- }
- }
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ ret = iwl_mvm_umac_scan_abort(mvm, type);
+ else
+ ret = iwl_mvm_lmac_scan_abort(mvm);
if (ret) {
- IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n");
+ IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
return ret;
}
ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
- if (ret)
- return ret;
-
- if (notify) {
- if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN)
- ieee80211_sched_scan_stopped(mvm->hw);
- if (type & IWL_UMAC_SCAN_UID_REG_SCAN) {
- ieee80211_scan_completed(mvm->hw, true);
- iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
- }
- }
return ret;
}
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
return sizeof(struct iwl_scan_req_umac) +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels +
sizeof(struct iwl_scan_req_umac_tail);
- return sizeof(struct iwl_scan_req_unified_lmac) +
+ return sizeof(struct iwl_scan_req_lmac) +
sizeof(struct iwl_scan_channel_cfg_lmac) *
mvm->fw->ucode_capa.n_scan_channels +
sizeof(struct iwl_scan_probe_req);
@@ -1640,47 +1507,76 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
*/
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
{
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
- u32 uid, i;
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ int uid, i;
- uid = iwl_mvm_find_first_scan(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
- if (uid < IWL_MVM_MAX_SIMULTANEOUS_SCANS) {
+ uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
+ if (uid >= 0) {
ieee80211_scan_completed(mvm->hw, true);
- mvm->scan_uid[uid] = 0;
+ mvm->scan_uid_status[uid] = 0;
}
- uid = iwl_mvm_find_first_scan(mvm,
- IWL_UMAC_SCAN_UID_SCHED_SCAN);
- if (uid < IWL_MVM_MAX_SIMULTANEOUS_SCANS && !mvm->restart_fw) {
+ uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
+ if (uid >= 0 && !mvm->restart_fw) {
ieee80211_sched_scan_stopped(mvm->hw);
- mvm->scan_uid[uid] = 0;
+ mvm->scan_uid_status[uid] = 0;
}
/* We shouldn't have any UIDs still set. Loop over all the
* UIDs to make sure there's nothing left there and warn if
* any is found.
*/
- for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
- if (WARN_ONCE(mvm->scan_uid[i],
- "UMAC scan UID %d was not cleaned\n",
- mvm->scan_uid[i]))
- mvm->scan_uid[i] = 0;
+ for (i = 0; i < mvm->max_scans; i++) {
+ if (WARN_ONCE(mvm->scan_uid_status[i],
+ "UMAC scan UID %d status was not cleaned\n",
+ i))
+ mvm->scan_uid_status[i] = 0;
}
} else {
- switch (mvm->scan_status) {
- case IWL_MVM_SCAN_NONE:
- break;
- case IWL_MVM_SCAN_OS:
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
ieee80211_scan_completed(mvm->hw, true);
- break;
- case IWL_MVM_SCAN_SCHED:
- /*
- * Sched scan will be restarted by mac80211 in
- * restart_hw, so do not report if FW is about to be
- * restarted.
- */
- if (!mvm->restart_fw)
- ieee80211_sched_scan_stopped(mvm->hw);
- break;
- }
+
+ /* Sched scan will be restarted by mac80211 in
+ * restart_hw, so do not report if FW is about to be
+ * restarted.
+ */
+ if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw)
+ ieee80211_sched_scan_stopped(mvm->hw);
+ }
+}
+
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
+{
+ int ret;
+
+ if (!(mvm->scan_status & type))
+ return 0;
+
+ if (iwl_mvm_is_radio_killed(mvm)) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = iwl_mvm_scan_stop_wait(mvm, type);
+ if (!ret)
+ mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
+out:
+ /* Clear the scan status so the next scan requests will
+ * succeed and mark the scan as stopping, so that the Rx
+ * handler doesn't do anything, as the scan was stopped from
+ * above.
+ */
+ mvm->scan_status &= ~type;
+
+ if (type == IWL_MVM_SCAN_REGULAR) {
+ /* Since the rx handler won't do anything now, we have
+ * to release the scan reference here.
+ */
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ if (notify)
+ ieee80211_scan_completed(mvm->hw, true);
+ } else if (notify) {
+ ieee80211_sched_scan_stopped(mvm->hw);
}
+
+ return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 1845b79487c8..d68dc697a4a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -5,8 +5,8 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,8 +31,8 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -1000,13 +1000,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+ iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
+ buf_size, ssn, wdg_timeout);
+
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
return -EIO;
- iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
- buf_size, ssn, wdg_timeout);
-
/*
* Even though in theory the peer could have different
* aggregation reorder buffer sizes for different sessions,
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index fd7b0d36f9a6..d24b6a83e68c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,12 +108,14 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* in the case that the time event actually completed in the firmware
* (which is handled in iwl_mvm_te_handle_notif).
*/
- if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
- if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
+ iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+ }
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
queues |= BIT(mvm->aux_queue);
-
- iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
+ }
synchronize_net();
@@ -393,6 +395,7 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
te_data->running = true;
+ iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
ieee80211_ready_on_channel(mvm->hw); /* Start TE */
} else {
IWL_DEBUG_TE(mvm,
@@ -794,13 +797,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
{
- struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_vif *mvmvif = NULL;
struct iwl_mvm_time_event_data *te_data;
bool is_p2p = false;
lockdep_assert_held(&mvm->mutex);
- mvmvif = NULL;
spin_lock_bh(&mvm->time_event_lock);
/*
@@ -818,17 +820,14 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
}
}
- /*
- * Iterate over the list of aux roc time events and find the time
- * event that is associated with a BSS interface.
- * This assumes that a BSS interface can have only a single time
- * event at any given time and this time event corresponds to a ROC
- * request
+ /* There can only be at most one AUX ROC time event, we just use the
+ * list to simplify/unify code. Remove it if it exists.
*/
- list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
+ te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
+ struct iwl_mvm_time_event_data,
+ list);
+ if (te_data)
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
- goto remove_te;
- }
remove_te:
spin_unlock_bh(&mvm->time_event_lock);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index ba615ad2176c..80d07db6e7e8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -70,7 +70,7 @@
static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
{
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
- u32 duration = mvm->thermal_throttle.params->ct_kill_duration;
+ u32 duration = tt->params.ct_kill_duration;
if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
return;
@@ -223,7 +223,7 @@ static void check_exit_ctkill(struct work_struct *work)
tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
- duration = tt->params->ct_kill_duration;
+ duration = tt->params.ct_kill_duration;
mutex_lock(&mvm->mutex);
@@ -247,7 +247,7 @@ static void check_exit_ctkill(struct work_struct *work)
IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
- if (temp <= tt->params->ct_kill_exit) {
+ if (temp <= tt->params.ct_kill_exit) {
mutex_unlock(&mvm->mutex);
iwl_mvm_exit_ctkill(mvm);
return;
@@ -325,7 +325,7 @@ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
{
- const struct iwl_tt_params *params = mvm->thermal_throttle.params;
+ struct iwl_tt_params *params = &mvm->thermal_throttle.params;
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
s32 temperature = mvm->temperature;
bool throttle_enable = false;
@@ -340,7 +340,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
}
if (params->support_ct_kill &&
- temperature <= tt->params->ct_kill_exit) {
+ temperature <= params->ct_kill_exit) {
iwl_mvm_exit_ctkill(mvm);
return;
}
@@ -400,7 +400,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
}
}
-static const struct iwl_tt_params iwl7000_tt_params = {
+static const struct iwl_tt_params iwl_mvm_default_tt_params = {
.ct_kill_entry = 118,
.ct_kill_exit = 96,
.ct_kill_duration = 5,
@@ -422,38 +422,16 @@ static const struct iwl_tt_params iwl7000_tt_params = {
.support_tx_backoff = true,
};
-static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
- .ct_kill_entry = 118,
- .ct_kill_exit = 96,
- .ct_kill_duration = 5,
- .dynamic_smps_entry = 114,
- .dynamic_smps_exit = 110,
- .tx_protection_entry = 114,
- .tx_protection_exit = 108,
- .tx_backoff = {
- {.temperature = 112, .backoff = 300},
- {.temperature = 113, .backoff = 800},
- {.temperature = 114, .backoff = 1500},
- {.temperature = 115, .backoff = 3000},
- {.temperature = 116, .backoff = 5000},
- {.temperature = 117, .backoff = 10000},
- },
- .support_ct_kill = true,
- .support_dynamic_smps = true,
- .support_tx_protection = true,
- .support_tx_backoff = true,
-};
-
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
{
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
- if (mvm->cfg->high_temp)
- tt->params = &iwl7000_high_temp_tt_params;
+ if (mvm->cfg->thermal_params)
+ tt->params = *mvm->cfg->thermal_params;
else
- tt->params = &iwl7000_tt_params;
+ tt->params = iwl_mvm_default_tt_params;
tt->throttle = false;
tt->dynamic_smps = false;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index ef32e177f662..7ba7a118ff5c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,30 @@
#include "mvm.h"
#include "sta.h"
+static void
+iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
+ u16 tid, u16 ssn)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+ ba_trig = (void *)trig->data;
+
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
+ return;
+
+ if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
+ return;
+
+ iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+ "BAR sent to %pM, tid %d, ssn %d",
+ addr, tid, ssn);
+}
+
/*
* Sets most of the Tx cmd's fields
*/
@@ -101,12 +125,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
} else if (ieee80211_is_back_req(fc)) {
struct ieee80211_bar *bar = (void *)skb->data;
u16 control = le16_to_cpu(bar->control);
+ u16 ssn = le16_to_cpu(bar->start_seq_num);
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
tx_cmd->tid_tspec = (control &
IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
+ iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
+ ssn);
} else {
tx_cmd->tid_tspec = IWL_TID_NON_QOS;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
@@ -144,8 +171,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
!is_multicast_ether_addr(ieee80211_get_DA(hdr)))
tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
- if ((mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
ieee80211_action_contains_tpc(skb))
tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index bc55a8b82db6..03f8e06dded7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -584,7 +584,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
struct iwl_error_event_table table;
u32 base;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)) {
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
iwl_mvm_dump_nic_error_log_old(mvm);
return;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index b18569734922..2ed1e4d2774d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -629,7 +629,18 @@ static int iwl_pci_resume(struct device *device)
if (!trans->op_mode)
return 0;
- iwl_enable_rfkill_int(trans);
+ /*
+ * On suspend, ict is disabled, and the interrupt mask
+ * gets cleared. Reconfigure them both in case of d0i3
+ * image. Otherwise, only enable rfkill interrupt (in
+ * order to keep track of the rfkill status)
+ */
+ if (trans->wowlan_d0i3) {
+ iwl_pcie_reset_ict(trans);
+ iwl_enable_interrupts(trans);
+ } else {
+ iwl_enable_rfkill_int(trans);
+ }
hw_rfkill = iwl_is_rfkill_set(trans);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 376b84e54ad7..31f72a61cc3f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -44,6 +44,15 @@
#include "iwl-io.h"
#include "iwl-op-mode.h"
+/*
+ * RX related structures and functions
+ */
+#define RX_NUM_QUEUES 1
+#define RX_POST_REQ_ALLOC 2
+#define RX_CLAIM_REQ_ALLOC 8
+#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
+#define RX_LOW_WATERMARK 8
+
struct iwl_host_cmd;
/*This file includes the declaration that are internal to the
@@ -77,29 +86,29 @@ struct isr_statistics {
* struct iwl_rxq - Rx queue
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @pool:
- * @queue:
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
+ * @used_count: Number of RBDs handled to allocator to use for allocation
* @write_actual:
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
+ * @rx_free: list of RBDs with allocated RB ready for use
+ * @rx_used: list of RBDs with no RB attached
* @need_update: flag to indicate we need to update read/write index
* @rb_stts: driver's pointer to receive buffer status
* @rb_stts_dma: bus address of receive buffer status
* @lock:
+ * @pool: initial pool of iwl_rx_mem_buffer for the queue
+ * @queue: actual rx queue
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
struct iwl_rxq {
__le32 *bd;
dma_addr_t bd_dma;
- struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
- struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
u32 read;
u32 write;
u32 free_count;
+ u32 used_count;
u32 write_actual;
struct list_head rx_free;
struct list_head rx_used;
@@ -107,6 +116,32 @@ struct iwl_rxq {
struct iwl_rb_status *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
+ struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
+ struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+};
+
+/**
+ * struct iwl_rb_allocator - Rx allocator
+ * @pool: initial pool of allocator
+ * @req_pending: number of requests the allcator had not processed yet
+ * @req_ready: number of requests honored and ready for claiming
+ * @rbd_allocated: RBDs with pages allocated and ready to be handled to
+ * the queue. This is a list of &struct iwl_rx_mem_buffer
+ * @rbd_empty: RBDs with no page attached for allocator use. This is a list
+ * of &struct iwl_rx_mem_buffer
+ * @lock: protects the rbd_allocated and rbd_empty lists
+ * @alloc_wq: work queue for background calls
+ * @rx_alloc: work struct for background calls
+ */
+struct iwl_rb_allocator {
+ struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
+ atomic_t req_pending;
+ atomic_t req_ready;
+ struct list_head rbd_allocated;
+ struct list_head rbd_empty;
+ spinlock_t lock;
+ struct workqueue_struct *alloc_wq;
+ struct work_struct rx_alloc;
};
struct iwl_dma_ptr {
@@ -250,7 +285,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
- * @rx_replenish: work that will be called when buffers need to be allocated
+ * @rba: allocator for RX replenishing
* @drv - pointer to iwl_drv
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
@@ -273,7 +308,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
*/
struct iwl_trans_pcie {
struct iwl_rxq rxq;
- struct work_struct rx_replenish;
+ struct iwl_rb_allocator rba;
struct iwl_trans *trans;
struct iwl_drv *drv;
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 7ff69c642103..a3fbaa0ef5e0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,7 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -74,16 +74,29 @@
* resets the Rx queue buffers with new memory.
*
* The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
+ * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
+ * When the interrupt handler is called, the request is processed.
+ * The page is either stolen - transferred to the upper layer
+ * or reused - added immediately to the iwl->rxq->rx_free list.
+ * + When the page is stolen - the driver updates the matching queue's used
+ * count, detaches the RBD and transfers it to the queue used list.
+ * When there are two used RBDs - they are transferred to the allocator empty
+ * list. Work is then scheduled for the allocator to start allocating
+ * eight buffers.
+ * When there are another 6 used RBDs - they are transferred to the allocator
+ * empty list and the driver tries to claim the pre-allocated buffers and
+ * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
+ * until ready.
+ * When there are 8+ buffers in the free list - either from allocation or from
+ * 8 reused unstolen pages - restock is called to update the FW and indexes.
+ * + In order to make sure the allocator always has RBDs to use for allocation
+ * the allocator has initial pool in the size of num_queues*(8-2) - the
+ * maximum missing RBDs per allocation request (request posted with 2
+ * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
+ * The queues supplies the recycle of the rest of the RBDs.
* + A received packet is processed and handed to the kernel network stack,
* detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
- * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * + If there are no allocated buffers in iwl->rxq->rx_free,
* the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
* If there were enough free buffers and RX_STALLED is set it is cleared.
*
@@ -92,18 +105,32 @@
*
* iwl_rxq_alloc() Allocates rx_free
* iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_pcie_rxq_restock
+ * iwl_pcie_rxq_restock.
+ * Used only during initialization.
* iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
* queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl_pcie_rx_replenish
+ * the WRITE index.
+ * iwl_pcie_rx_allocator() Background work for allocating pages.
*
* -- enable interrupts --
* ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
* READ INDEX, detaching the SKB from the pool.
* Moves the packet buffer from queue to rx_used.
+ * Posts and claims requests to the allocator.
* Calls iwl_pcie_rxq_restock to refill any empty
* slots.
+ *
+ * RBD life-cycle:
+ *
+ * Init:
+ * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
+ *
+ * Regular Receive interrupt:
+ * Page Stolen:
+ * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
+ * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
+ * Page not Stolen:
+ * rxq.queue -> rxq.rx_free -> rxq.queue
* ...
*
*/
@@ -240,10 +267,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
rxq->free_count--;
}
spin_unlock(&rxq->lock);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- schedule_work(&trans_pcie->rx_replenish);
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
@@ -255,6 +278,44 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
}
/*
+ * iwl_pcie_rx_alloc_page - allocates and returns a page.
+ *
+ */
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct page *page;
+ gfp_t gfp_mask = GFP_KERNEL;
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (trans_pcie->rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
+ trans_pcie->rx_page_order);
+ /* Issue an error if the hardware has consumed more than half
+ * of its free buffer list and we don't have enough
+ * pre-allocated buffers.
+` */
+ if (rxq->free_count <= RX_LOW_WATERMARK &&
+ iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
+ net_ratelimit())
+ IWL_CRIT(trans,
+ "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
+ rxq->free_count);
+ return NULL;
+ }
+ return page;
+}
+
+/*
* iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
*
* A used RBD is an Rx buffer that has been given to the stack. To use it again
@@ -263,13 +324,12 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
* allocated buffers.
*/
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
- gfp_t gfp_mask = priority;
while (1) {
spin_lock(&rxq->lock);
@@ -279,32 +339,10 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
}
spin_unlock(&rxq->lock);
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (trans_pcie->rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
/* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(trans, "alloc_pages failed, "
- "order: %d\n",
- trans_pcie->rx_page_order);
-
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(trans, "Failed to alloc_pages with %s."
- "Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ?
- "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
+ page = iwl_pcie_rx_alloc_page(trans);
+ if (!page)
return;
- }
spin_lock(&rxq->lock);
@@ -355,7 +393,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
lockdep_assert_held(&rxq->lock);
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ for (i = 0; i < RX_QUEUE_SIZE; i++) {
if (!rxq->pool[i].page)
continue;
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -372,32 +410,144 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
* When moving to rx_free an page is allocated for the slot.
*
* Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called as a scheduled work item (except for during initialization)
+ * This is called only during initialization
*/
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
{
- iwl_pcie_rxq_alloc_rbs(trans, gfp);
+ iwl_pcie_rxq_alloc_rbs(trans);
iwl_pcie_rxq_restock(trans);
}
-static void iwl_pcie_rx_replenish_work(struct work_struct *data)
+/*
+ * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
+ *
+ * Allocates for each received request 8 pages
+ * Called as a scheduled work item.
+ */
+static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+ while (atomic_read(&rba->req_pending)) {
+ int i;
+ struct list_head local_empty;
+ struct list_head local_allocated;
+
+ INIT_LIST_HEAD(&local_allocated);
+ spin_lock(&rba->lock);
+ /* swap out the entire rba->rbd_empty to a local list */
+ list_replace_init(&rba->rbd_empty, &local_empty);
+ spin_unlock(&rba->lock);
+
+ for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
+ struct iwl_rx_mem_buffer *rxb;
+ struct page *page;
+
+ /* List should never be empty - each reused RBD is
+ * returned to the list, and initial pool covers any
+ * possible gap between the time the page is allocated
+ * to the time the RBD is added.
+ */
+ BUG_ON(list_empty(&local_empty));
+ /* Get the first rxb from the rbd list */
+ rxb = list_first_entry(&local_empty,
+ struct iwl_rx_mem_buffer, list);
+ BUG_ON(rxb->page);
+
+ /* Alloc a new receive buffer */
+ page = iwl_pcie_rx_alloc_page(trans);
+ if (!page)
+ continue;
+ rxb->page = page;
+
+ /* Get physical address of the RB */
+ rxb->page_dma = dma_map_page(trans->dev, page, 0,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+ rxb->page = NULL;
+ __free_pages(page, trans_pcie->rx_page_order);
+ continue;
+ }
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ /* move the allocated entry to the out list */
+ list_move(&rxb->list, &local_allocated);
+ i++;
+ }
+
+ spin_lock(&rba->lock);
+ /* add the allocated rbds to the allocator allocated list */
+ list_splice_tail(&local_allocated, &rba->rbd_allocated);
+ /* add the unused rbds back to the allocator empty list */
+ list_splice_tail(&local_empty, &rba->rbd_empty);
+ spin_unlock(&rba->lock);
+
+ atomic_dec(&rba->req_pending);
+ atomic_inc(&rba->req_ready);
+ }
+}
+
+/*
+ * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
+.*
+.* Called by queue when the queue posted allocation request and
+ * has freed 8 RBDs in order to restock itself.
+ */
+static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+ struct iwl_rx_mem_buffer
+ *out[RX_CLAIM_REQ_ALLOC])
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int i;
+
+ if (atomic_dec_return(&rba->req_ready) < 0) {
+ atomic_inc(&rba->req_ready);
+ IWL_DEBUG_RX(trans,
+ "Allocation request not ready, pending requests = %d\n",
+ atomic_read(&rba->req_pending));
+ return -ENOMEM;
+ }
+
+ spin_lock(&rba->lock);
+ for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
+ /* Get next free Rx buffer, remove it from free list */
+ out[i] = list_first_entry(&rba->rbd_allocated,
+ struct iwl_rx_mem_buffer, list);
+ list_del(&out[i]->list);
+ }
+ spin_unlock(&rba->lock);
+
+ return 0;
+}
+
+static void iwl_pcie_rx_allocator_work(struct work_struct *data)
{
+ struct iwl_rb_allocator *rba_p =
+ container_of(data, struct iwl_rb_allocator, rx_alloc);
struct iwl_trans_pcie *trans_pcie =
- container_of(data, struct iwl_trans_pcie, rx_replenish);
+ container_of(rba_p, struct iwl_trans_pcie, rba);
- iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
+ iwl_pcie_rx_allocator(trans_pcie->trans);
}
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct device *dev = trans->dev;
memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
spin_lock_init(&rxq->lock);
+ spin_lock_init(&rba->lock);
if (WARN_ON(rxq->bd || rxq->rb_stts))
return -EINVAL;
@@ -487,15 +637,49 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
rxq->free_count = 0;
+ rxq->used_count = 0;
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
list_add(&rxq->pool[i].list, &rxq->rx_used);
}
+static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
+{
+ int i;
+
+ lockdep_assert_held(&rba->lock);
+
+ INIT_LIST_HEAD(&rba->rbd_allocated);
+ INIT_LIST_HEAD(&rba->rbd_empty);
+
+ for (i = 0; i < RX_POOL_SIZE; i++)
+ list_add(&rba->pool[i].list, &rba->rbd_empty);
+}
+
+static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int i;
+
+ lockdep_assert_held(&rba->lock);
+
+ for (i = 0; i < RX_POOL_SIZE; i++) {
+ if (!rba->pool[i].page)
+ continue;
+ dma_unmap_page(trans->dev, rba->pool[i].page_dma,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
+ rba->pool[i].page = NULL;
+ }
+}
+
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i, err;
if (!rxq->bd) {
@@ -503,11 +687,21 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
if (err)
return err;
}
+ if (!rba->alloc_wq)
+ rba->alloc_wq = alloc_workqueue("rb_allocator",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
+
+ spin_lock(&rba->lock);
+ atomic_set(&rba->req_pending, 0);
+ atomic_set(&rba->req_ready, 0);
+ /* free all first - we might be reconfigured for a different size */
+ iwl_pcie_rx_free_rba(trans);
+ iwl_pcie_rx_init_rba(rba);
+ spin_unlock(&rba->lock);
spin_lock(&rxq->lock);
- INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
-
/* free all first - we might be reconfigured for a different size */
iwl_pcie_rxq_free_rbs(trans);
iwl_pcie_rx_init_rxb_lists(rxq);
@@ -522,7 +716,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock(&rxq->lock);
- iwl_pcie_rx_replenish(trans, GFP_KERNEL);
+ iwl_pcie_rx_replenish(trans);
iwl_pcie_rx_hw_init(trans, rxq);
@@ -537,6 +731,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
/*if rxq->bd is NULL, it means that nothing has been allocated,
* exit now */
@@ -545,7 +740,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
return;
}
- cancel_work_sync(&trans_pcie->rx_replenish);
+ cancel_work_sync(&rba->rx_alloc);
+ if (rba->alloc_wq) {
+ destroy_workqueue(rba->alloc_wq);
+ rba->alloc_wq = NULL;
+ }
+
+ spin_lock(&rba->lock);
+ iwl_pcie_rx_free_rba(trans);
+ spin_unlock(&rba->lock);
spin_lock(&rxq->lock);
iwl_pcie_rxq_free_rbs(trans);
@@ -566,6 +769,43 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
rxq->rb_stts = NULL;
}
+/*
+ * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
+ *
+ * Called when a RBD can be reused. The RBD is transferred to the allocator.
+ * When there are 2 empty RBDs - a request for allocation is posted
+ */
+static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rxq *rxq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+ /* Count the used RBDs */
+ rxq->used_count++;
+
+ /* Move the RBD to the used list, will be moved to allocator in batches
+ * before claiming or posting a request*/
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ /* If we have RX_POST_REQ_ALLOC new released rx buffers -
+ * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
+ * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
+ * after but we still need to post another request.
+ */
+ if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
+ /* Move the 2 RBDs to the allocator ownership.
+ Allocator has another 6 from pool for the request completion*/
+ spin_lock(&rba->lock);
+ list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+ spin_unlock(&rba->lock);
+
+ atomic_inc(&rba->req_pending);
+ queue_work(rba->alloc_wq, &rba->rx_alloc);
+ }
+}
+
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_rx_mem_buffer *rxb)
{
@@ -688,13 +928,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
*/
__free_pages(rxb->page, trans_pcie->rx_page_order);
rxb->page = NULL;
- list_add_tail(&rxb->list, &rxq->rx_used);
+ iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
} else {
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
}
} else
- list_add_tail(&rxb->list, &rxq->rx_used);
+ iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
}
/*
@@ -704,10 +944,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- u32 r, i;
- u8 fill_rx = 0;
- u32 count = 8;
- int total_empty;
+ u32 r, i, j;
restart:
spin_lock(&rxq->lock);
@@ -720,14 +957,6 @@ restart:
if (i == r)
IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
- /* calculate total frames need to be restock after handling RX */
- total_empty = r - rxq->write_actual;
- if (total_empty < 0)
- total_empty += RX_QUEUE_SIZE;
-
- if (total_empty > (RX_QUEUE_SIZE / 2))
- fill_rx = 1;
-
while (i != r) {
struct iwl_rx_mem_buffer *rxb;
@@ -739,29 +968,48 @@ restart:
iwl_pcie_rx_handle_rb(trans, rxb);
i = (i + 1) & RX_QUEUE_MASK;
- /* If there are a lot of unused frames,
- * restock the Rx queue so ucode wont assert. */
- if (fill_rx) {
- count++;
- if (count >= 8) {
- rxq->read = i;
- spin_unlock(&rxq->lock);
- iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
- count = 0;
- goto restart;
+
+ /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+ * try to claim the pre-allocated buffers from the allocator */
+ if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
+
+ /* Add the remaining 6 empty RBDs for allocator use */
+ spin_lock(&rba->lock);
+ list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+ spin_unlock(&rba->lock);
+
+ /* If not ready - continue, will try to reclaim later.
+ * No need to reschedule work - allocator exits only on
+ * success */
+ if (!iwl_pcie_rx_allocator_get(trans, out)) {
+ /* If success - then RX_CLAIM_REQ_ALLOC
+ * buffers were retrieved and should be added
+ * to free list */
+ rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+ for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
+ list_add_tail(&out[j]->list,
+ &rxq->rx_free);
+ rxq->free_count++;
+ }
}
}
+ /* handle restock for two cases:
+ * - we just pulled buffers from the allocator
+ * - we have 8+ unstolen pages accumulated */
+ if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
+ rxq->read = i;
+ spin_unlock(&rxq->lock);
+ iwl_pcie_rxq_restock(trans);
+ goto restart;
+ }
}
/* Backtrack one entry */
rxq->read = i;
spin_unlock(&rxq->lock);
- if (fill_rx)
- iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
- else
- iwl_pcie_rxq_restock(trans);
-
if (trans_pcie->napi.poll)
napi_gro_flush(&trans_pcie->napi, false);
}
@@ -775,6 +1023,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
if (trans->cfg->internal_wimax_coex &&
+ !trans->cfg->apmg_not_supported &&
(!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dc179094e6a0..43ae658af6ec 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -101,14 +101,26 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
trans_pcie->fw_mon_size = 0;
}
-static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
+static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct page *page = NULL;
dma_addr_t phys;
- u32 size;
+ u32 size = 0;
u8 power;
+ if (!max_power) {
+ /* default max_power is maximum */
+ max_power = 26;
+ } else {
+ max_power += 11;
+ }
+
+ if (WARN(max_power > 26,
+ "External buffer size for monitor is too big %d, check the FW TLV\n",
+ max_power))
+ return;
+
if (trans_pcie->fw_mon_page) {
dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
trans_pcie->fw_mon_size,
@@ -117,7 +129,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
}
phys = 0;
- for (power = 26; power >= 11; power--) {
+ for (power = max_power; power >= 11; power--) {
int order;
size = BIT(power);
@@ -143,6 +155,12 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
if (WARN_ON_ONCE(!page))
return;
+ if (power != max_power)
+ IWL_ERR(trans,
+ "Sorry - debug buffer is only %luK while you requested %luK\n",
+ (unsigned long)BIT(power - 10),
+ (unsigned long)BIT(max_power - 10));
+
trans_pcie->fw_mon_page = page;
trans_pcie->fw_mon_phys = phys;
trans_pcie->fw_mon_size = size;
@@ -164,6 +182,9 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
+ if (!trans->cfg->apmg_not_supported)
+ return;
+
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
@@ -297,7 +318,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
* bits do not disable clocks. This preserves any hardware
* bits already set by default in "CLK_CTRL_REG" after reset.
*/
- if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ if (!trans->cfg->apmg_not_supported) {
iwl_write_prph(trans, APMG_CLK_EN_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(20);
@@ -497,8 +518,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
spin_unlock(&trans_pcie->irq_lock);
- if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
- iwl_pcie_set_pwr(trans, false);
+ iwl_pcie_set_pwr(trans, false);
iwl_op_mode_nic_config(trans->op_mode);
@@ -834,7 +854,7 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
get_fw_dbg_mode_string(dest->monitor_mode));
if (dest->monitor_mode == EXTERNAL_MODE)
- iwl_pcie_alloc_fw_monitor(trans);
+ iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
else
IWL_WARN(trans, "PCI should have external buffer debug\n");
@@ -908,7 +928,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
/* supported for 7000 only for the moment */
if (iwlwifi_mod_params.fw_monitor &&
trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
- iwl_pcie_alloc_fw_monitor(trans);
+ iwl_pcie_alloc_fw_monitor(trans, 0);
if (trans_pcie->fw_mon_size) {
iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
@@ -955,12 +975,8 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
return ret;
/* load to FW the binary sections of CPU2 */
- ret = iwl_pcie_load_cpu_sections_8000(trans, image, 2,
- &first_ucode_section);
- if (ret)
- return ret;
-
- return 0;
+ return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
+ &first_ucode_section);
}
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
@@ -1049,7 +1065,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
iwl_pcie_rx_stop(trans);
/* Power-down device's busmaster DMA clocks */
- if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ if (!trans->cfg->apmg_not_supported) {
iwl_write_prph(trans, APMG_CLK_DIS_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
@@ -1346,14 +1362,13 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iounmap(trans_pcie->hw_base);
pci_release_regions(trans_pcie->pci_dev);
pci_disable_device(trans_pcie->pci_dev);
- kmem_cache_destroy(trans->dev_cmd_pool);
if (trans_pcie->napi.poll)
netif_napi_del(&trans_pcie->napi);
iwl_pcie_free_fw_monitor(trans);
- kfree(trans);
+ iwl_trans_free(trans);
}
static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
@@ -2200,6 +2215,29 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
return sizeof(**data) + fh_regs_len;
}
+static u32
+iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_fw_mon *fw_mon_data,
+ u32 monitor_len)
+{
+ u32 buf_size_in_dwords = (monitor_len >> 2);
+ u32 *buffer = (u32 *)fw_mon_data->data;
+ unsigned long flags;
+ u32 i;
+
+ if (!iwl_trans_grab_nic_access(trans, false, &flags))
+ return 0;
+
+ __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
+ for (i = 0; i < buf_size_in_dwords; i++)
+ buffer[i] = __iwl_read_prph(trans, MON_DMARB_RD_DATA_ADDR);
+ __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
+
+ iwl_trans_release_nic_access(trans, &flags);
+
+ return monitor_len;
+}
+
static
struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
{
@@ -2252,7 +2290,8 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
trans->dbg_dest_tlv->end_shift;
/* Make "end" point to the actual end */
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
+ trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
end += (1 << trans->dbg_dest_tlv->end_shift);
monitor_len = end - base;
len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
@@ -2328,9 +2367,6 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
len += sizeof(*data) + sizeof(*fw_mon_data);
if (trans_pcie->fw_mon_page) {
- data->len = cpu_to_le32(trans_pcie->fw_mon_size +
- sizeof(*fw_mon_data));
-
/*
* The firmware is now asserted, it won't write anything
* to the buffer. CPU can take ownership to fetch the
@@ -2345,10 +2381,8 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
page_address(trans_pcie->fw_mon_page),
trans_pcie->fw_mon_size);
- len += trans_pcie->fw_mon_size;
- } else {
- /* If we are here then the buffer is internal */
-
+ monitor_len = trans_pcie->fw_mon_size;
+ } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
/*
* Update pointers to reflect actual values after
* shifting
@@ -2357,10 +2391,18 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
trans->dbg_dest_tlv->base_shift;
iwl_trans_read_mem(trans, base, fw_mon_data->data,
monitor_len / sizeof(u32));
- data->len = cpu_to_le32(sizeof(*fw_mon_data) +
- monitor_len);
- len += monitor_len;
+ } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+ monitor_len =
+ iwl_trans_pci_dump_marbh_monitor(trans,
+ fw_mon_data,
+ monitor_len);
+ } else {
+ /* Didn't match anything - output no monitor data */
+ monitor_len = 0;
}
+
+ len += monitor_len;
+ data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
}
dump_data->len = len;
@@ -2419,18 +2461,13 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
u16 pci_cmd;
int err;
- trans = kzalloc(sizeof(struct iwl_trans) +
- sizeof(struct iwl_trans_pcie), GFP_KERNEL);
- if (!trans) {
- err = -ENOMEM;
- goto out;
- }
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
+ &pdev->dev, cfg, &trans_ops_pcie, 0);
+ if (!trans)
+ return ERR_PTR(-ENOMEM);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- trans->ops = &trans_ops_pcie;
- trans->cfg = cfg;
- trans_lockdep_init(trans);
trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
@@ -2554,25 +2591,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans_pcie->wait_command_queue);
- snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
- "iwl_cmd_pool:%s", dev_name(trans->dev));
-
- trans->dev_cmd_headroom = 0;
- trans->dev_cmd_pool =
- kmem_cache_create(trans->dev_cmd_pool_name,
- sizeof(struct iwl_device_cmd)
- + trans->dev_cmd_headroom,
- sizeof(void *),
- SLAB_HWCACHE_ALIGN,
- NULL);
-
- if (!trans->dev_cmd_pool) {
- err = -ENOMEM;
- goto out_pci_disable_msi;
- }
-
if (iwl_pcie_alloc_ict(trans))
- goto out_free_cmd_pool;
+ goto out_pci_disable_msi;
err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
iwl_pcie_irq_handler,
@@ -2589,8 +2609,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
out_free_ict:
iwl_pcie_free_ict(trans);
-out_free_cmd_pool:
- kmem_cache_destroy(trans->dev_cmd_pool);
out_pci_disable_msi:
pci_disable_msi(pdev);
out_pci_release_regions:
@@ -2598,7 +2616,6 @@ out_pci_release_regions:
out_pci_disable_device:
pci_disable_device(pdev);
out_no_pci:
- kfree(trans);
-out:
+ iwl_trans_free(trans);
return ERR_PTR(err);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 5ef8044c2ea3..2b86c2135de3 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1049,8 +1049,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
!trans_pcie->cmd_hold_nic_awake) {
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
- udelay(2);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 1a4d558022d8..8317afd065b4 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -835,14 +835,13 @@ static int lbs_cfg_scan(struct wiphy *wiphy,
* Events
*/
-void lbs_send_disconnect_notification(struct lbs_private *priv)
+void lbs_send_disconnect_notification(struct lbs_private *priv,
+ bool locally_generated)
{
lbs_deb_enter(LBS_DEB_CFG80211);
- cfg80211_disconnected(priv->dev,
- 0,
- NULL, 0,
- GFP_KERNEL);
+ cfg80211_disconnected(priv->dev, 0, NULL, 0, locally_generated,
+ GFP_KERNEL);
lbs_deb_leave(LBS_DEB_CFG80211);
}
@@ -1458,7 +1457,7 @@ int lbs_disconnect(struct lbs_private *priv, u16 reason)
cfg80211_disconnected(priv->dev,
reason,
- NULL, 0,
+ NULL, 0, true,
GFP_KERNEL);
priv->connect_status = LBS_DISCONNECTED;
@@ -2031,7 +2030,7 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
/* TODO: consider doing this at MACREG_INT_CODE_ADHOC_BCN_LOST time */
- lbs_mac_event_disconnected(priv);
+ lbs_mac_event_disconnected(priv, true);
lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h
index 10995f59fe34..acccc2922401 100644
--- a/drivers/net/wireless/libertas/cfg.h
+++ b/drivers/net/wireless/libertas/cfg.h
@@ -10,7 +10,8 @@ struct wireless_dev *lbs_cfg_alloc(struct device *dev);
int lbs_cfg_register(struct lbs_private *priv);
void lbs_cfg_free(struct lbs_private *priv);
-void lbs_send_disconnect_notification(struct lbs_private *priv);
+void lbs_send_disconnect_notification(struct lbs_private *priv,
+ bool locally_generated);
void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
void lbs_scan_done(struct lbs_private *priv);
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 4279e8ab95f2..0c5444b02c64 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -68,7 +68,8 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
/* From cmdresp.c */
-void lbs_mac_event_disconnected(struct lbs_private *priv);
+void lbs_mac_event_disconnected(struct lbs_private *priv,
+ bool locally_generated);
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 65f18f1e869c..e5442e8956f7 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -19,10 +19,13 @@
* reset link state etc.
*
* @priv: A pointer to struct lbs_private structure
+ * @locally_generated: indicates disconnect was requested locally
+ * (usually by userspace)
*
* returns: n/a
*/
-void lbs_mac_event_disconnected(struct lbs_private *priv)
+void lbs_mac_event_disconnected(struct lbs_private *priv,
+ bool locally_generated)
{
if (priv->connect_status != LBS_CONNECTED)
return;
@@ -36,7 +39,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
msleep_interruptible(1000);
if (priv->wdev->iftype == NL80211_IFTYPE_STATION)
- lbs_send_disconnect_notification(priv);
+ lbs_send_disconnect_notification(priv, locally_generated);
/* report disconnect to upper layer */
netif_stop_queue(priv->dev);
@@ -229,17 +232,17 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
case MACREG_INT_CODE_DEAUTHENTICATED:
lbs_deb_cmd("EVENT: deauthenticated\n");
- lbs_mac_event_disconnected(priv);
+ lbs_mac_event_disconnected(priv, false);
break;
case MACREG_INT_CODE_DISASSOCIATED:
lbs_deb_cmd("EVENT: disassociated\n");
- lbs_mac_event_disconnected(priv);
+ lbs_mac_event_disconnected(priv, false);
break;
case MACREG_INT_CODE_LINK_LOST_NO_SCAN:
lbs_deb_cmd("EVENT: link lost\n");
- lbs_mac_event_disconnected(priv);
+ lbs_mac_event_disconnected(priv, true);
break;
case MACREG_INT_CODE_PS_SLEEP:
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index ed02e4bf2c26..1bdf18674fb8 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -439,7 +439,7 @@ static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw,
return mc_count;
}
-#define SUPPORTED_FIF_FLAGS (FIF_PROMISC_IN_BSS | FIF_ALLMULTI)
+#define SUPPORTED_FIF_FLAGS FIF_ALLMULTI
static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
@@ -458,10 +458,7 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
return;
}
- if (*new_flags & (FIF_PROMISC_IN_BSS))
- priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
- else
- priv->mac_control &= ~CMD_ACT_MAC_PROMISCUOUS_ENABLE;
+ priv->mac_control &= ~CMD_ACT_MAC_PROMISCUOUS_ENABLE;
if (*new_flags & (FIF_ALLMULTI) ||
multicast > MRVDRV_MAX_MULTICAST_LIST_SIZE) {
priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index d5c0a1af08b9..8d2f6bbf9598 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1554,8 +1554,6 @@ static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw,
wiphy_debug(hw->wiphy, "%s\n", __func__);
data->rx_filter = 0;
- if (*total_flags & FIF_PROMISC_IN_BSS)
- data->rx_filter |= FIF_PROMISC_IN_BSS;
if (*total_flags & FIF_ALLMULTI)
data->rx_filter |= FIF_ALLMULTI;
@@ -2399,7 +2397,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_QUEUE_CONTROL |
IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
- IEEE80211_HW_CHANCTX_STA_CSA;
+ IEEE80211_HW_CHANCTX_STA_CSA |
+ IEEE80211_HW_SUPPORT_FAST_XMIT;
if (rctbl)
hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
@@ -2438,6 +2437,31 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz);
sband->bitrates = data->rates + 4;
sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
+
+ sband->vht_cap.vht_supported = true;
+ sband->vht_cap.cap =
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
+ IEEE80211_VHT_CAP_RXLDPC |
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_TXSTBC |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_RXSTBC_2 |
+ IEEE80211_VHT_CAP_RXSTBC_3 |
+ IEEE80211_VHT_CAP_RXSTBC_4 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ sband->vht_cap.vht_mcs.rx_mcs_map =
+ cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 14);
+ sband->vht_cap.vht_mcs.tx_mcs_map =
+ sband->vht_cap.vht_mcs.rx_mcs_map;
break;
default:
continue;
@@ -2458,31 +2482,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
hw->wiphy->bands[band] = sband;
-
- sband->vht_cap.vht_supported = true;
- sband->vht_cap.cap =
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
- IEEE80211_VHT_CAP_RXLDPC |
- IEEE80211_VHT_CAP_SHORT_GI_80 |
- IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_TXSTBC |
- IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_RXSTBC_2 |
- IEEE80211_VHT_CAP_RXSTBC_3 |
- IEEE80211_VHT_CAP_RXSTBC_4 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
- sband->vht_cap.vht_mcs.rx_mcs_map =
- cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_8 << 0 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 2 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 6 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 8 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 14);
- sband->vht_cap.vht_mcs.tx_mcs_map =
- sband->vht_cap.vht_mcs.rx_mcs_map;
}
/* By default all radios belong to the first group */
diff --git a/drivers/net/wireless/mediatek/Kconfig b/drivers/net/wireless/mediatek/Kconfig
new file mode 100644
index 000000000000..cba300c6b5da
--- /dev/null
+++ b/drivers/net/wireless/mediatek/Kconfig
@@ -0,0 +1,10 @@
+menuconfig WL_MEDIATEK
+ bool "Mediatek Wireless LAN support"
+ ---help---
+ Enable community drivers for MediaTek WiFi devices.
+ Those drivers make use of the Linux mac80211 stack.
+
+
+if WL_MEDIATEK
+source "drivers/net/wireless/mediatek/mt7601u/Kconfig"
+endif # WL_MEDIATEK
diff --git a/drivers/net/wireless/mediatek/Makefile b/drivers/net/wireless/mediatek/Makefile
new file mode 100644
index 000000000000..9d5f182fd7fd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MT7601U) += mt7601u/
diff --git a/drivers/net/wireless/mediatek/mt7601u/Kconfig b/drivers/net/wireless/mediatek/mt7601u/Kconfig
new file mode 100644
index 000000000000..f46bed92796b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/Kconfig
@@ -0,0 +1,6 @@
+config MT7601U
+ tristate "MediaTek MT7601U (USB) support"
+ depends on MAC80211
+ depends on USB
+ ---help---
+ This adds support for MT7601U-based wireless USB dongles.
diff --git a/drivers/net/wireless/mediatek/mt7601u/Makefile b/drivers/net/wireless/mediatek/mt7601u/Makefile
new file mode 100644
index 000000000000..ea9ed8a5db4d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -D__CHECK_ENDIAN__
+
+obj-$(CONFIG_MT7601U) += mt7601u.o
+
+mt7601u-objs = \
+ usb.o init.o main.o mcu.o trace.o dma.o core.o eeprom.o phy.o \
+ mac.o util.o debugfs.o tx.o
+
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt7601u/core.c b/drivers/net/wireless/mediatek/mt7601u/core.c
new file mode 100644
index 000000000000..0aabd790f985
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/core.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+
+int mt7601u_wait_asic_ready(struct mt7601u_dev *dev)
+{
+ int i = 100;
+ u32 val;
+
+ do {
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return -EIO;
+
+ val = mt7601u_rr(dev, MT_MAC_CSR0);
+ if (val && ~val)
+ return 0;
+
+ udelay(10);
+ } while (i--);
+
+ return -EIO;
+}
+
+bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout)
+{
+ u32 cur;
+
+ timeout /= 10;
+ do {
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return false;
+
+ cur = mt7601u_rr(dev, offset) & mask;
+ if (cur == val)
+ return true;
+
+ udelay(10);
+ } while (timeout-- > 0);
+
+ dev_err(dev->dev, "Error: Time out with reg %08x\n", offset);
+
+ return false;
+}
+
+bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout)
+{
+ u32 cur;
+
+ timeout /= 10;
+ do {
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return false;
+
+ cur = mt7601u_rr(dev, offset) & mask;
+ if (cur == val)
+ return true;
+
+ msleep(10);
+ } while (timeout-- > 0);
+
+ dev_err(dev->dev, "Error: Time out with reg %08x\n", offset);
+
+ return false;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
new file mode 100644
index 000000000000..fc008475a03b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+
+#include "mt7601u.h"
+#include "eeprom.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+ struct mt7601u_dev *dev = data;
+
+ mt76_wr(dev, dev->debugfs_reg, val);
+ return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+ struct mt7601u_dev *dev = data;
+
+ *val = mt76_rr(dev, dev->debugfs_reg);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+
+static int
+mt7601u_ampdu_stat_read(struct seq_file *file, void *data)
+{
+ struct mt7601u_dev *dev = file->private;
+ int i, j;
+
+#define stat_printf(grp, off, name) \
+ seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
+
+ stat_printf(rx_stat, 0, rx_crc_err);
+ stat_printf(rx_stat, 1, rx_phy_err);
+ stat_printf(rx_stat, 2, rx_false_cca);
+ stat_printf(rx_stat, 3, rx_plcp_err);
+ stat_printf(rx_stat, 4, rx_fifo_overflow);
+ stat_printf(rx_stat, 5, rx_duplicate);
+
+ stat_printf(tx_stat, 0, tx_fail_cnt);
+ stat_printf(tx_stat, 1, tx_bcn_cnt);
+ stat_printf(tx_stat, 2, tx_success);
+ stat_printf(tx_stat, 3, tx_retransmit);
+ stat_printf(tx_stat, 4, tx_zero_len);
+ stat_printf(tx_stat, 5, tx_underflow);
+
+ stat_printf(aggr_stat, 0, non_aggr_tx);
+ stat_printf(aggr_stat, 1, aggr_tx);
+
+ stat_printf(zero_len_del, 0, tx_zero_len_del);
+ stat_printf(zero_len_del, 1, rx_zero_len_del);
+#undef stat_printf
+
+ seq_puts(file, "Aggregations stats:\n");
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8; j++)
+ seq_printf(file, "%08llx ",
+ dev->stats.aggr_n[i * 8 + j]);
+ seq_putc(file, '\n');
+ }
+
+ seq_printf(file, "recent average AMPDU len: %d\n",
+ atomic_read(&dev->avg_ampdu_len));
+
+ return 0;
+}
+
+static int
+mt7601u_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt7601u_ampdu_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_ampdu_stat = {
+ .open = mt7601u_ampdu_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
+mt7601u_eeprom_param_read(struct seq_file *file, void *data)
+{
+ struct mt7601u_dev *dev = file->private;
+ struct mt7601u_rate_power *rp = &dev->ee->power_rate_table;
+ struct tssi_data *td = &dev->ee->tssi_data;
+ int i;
+
+ seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
+ seq_printf(file, "RSSI offset: %hhx %hhx\n",
+ dev->ee->rssi_offset[0], dev->ee->rssi_offset[1]);
+ seq_printf(file, "Reference temp: %hhx\n", dev->ee->ref_temp);
+ seq_printf(file, "LNA gain: %hhx\n", dev->ee->lna_gain);
+ seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
+ dev->ee->reg.start + dev->ee->reg.num - 1);
+
+ seq_puts(file, "Per rate power:\n");
+ for (i = 0; i < 2; i++)
+ seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+ rp->cck[i].raw, rp->cck[i].bw20, rp->cck[i].bw40);
+ for (i = 0; i < 4; i++)
+ seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+ rp->ofdm[i].raw, rp->ofdm[i].bw20, rp->ofdm[i].bw40);
+ for (i = 0; i < 4; i++)
+ seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+ rp->ht[i].raw, rp->ht[i].bw20, rp->ht[i].bw40);
+
+ seq_puts(file, "Per channel power:\n");
+ for (i = 0; i < 7; i++)
+ seq_printf(file, "\t tx_power ch%u:%02hhx ch%u:%02hhx\n",
+ i * 2 + 1, dev->ee->chan_pwr[i * 2],
+ i * 2 + 2, dev->ee->chan_pwr[i * 2 + 1]);
+
+ if (!dev->ee->tssi_enabled)
+ return 0;
+
+ seq_puts(file, "TSSI:\n");
+ seq_printf(file, "\t slope:%02hhx\n", td->slope);
+ seq_printf(file, "\t offset=%02hhx %02hhx %02hhx\n",
+ td->offset[0], td->offset[1], td->offset[2]);
+ seq_printf(file, "\t delta_off:%08x\n", td->tx0_delta_offset);
+
+ return 0;
+}
+
+static int
+mt7601u_eeprom_param_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt7601u_eeprom_param_read, inode->i_private);
+}
+
+static const struct file_operations fops_eeprom_param = {
+ .open = mt7601u_eeprom_param_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void mt7601u_init_debugfs(struct mt7601u_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("mt7601u", dev->hw->wiphy->debugfsdir);
+ if (!dir)
+ return;
+
+ debugfs_create_u8("temperature", S_IRUSR, dir, &dev->raw_temp);
+ debugfs_create_u32("temp_mode", S_IRUSR, dir, &dev->temp_mode);
+
+ debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
+ debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
+ &fops_regval);
+ debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
+ debugfs_create_file("eeprom_param", S_IRUSR, dir, dev,
+ &fops_eeprom_param);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
new file mode 100644
index 000000000000..9c9e1288644b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "dma.h"
+#include "usb.h"
+#include "trace.h"
+
+static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
+ struct mt7601u_dma_buf_rx *e, gfp_t gfp);
+
+static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
+{
+ const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
+ unsigned int hdrlen;
+
+ if (unlikely(len < 10))
+ return 0;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ if (unlikely(hdrlen > len))
+ return 0;
+ return hdrlen;
+}
+
+static struct sk_buff *
+mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
+ u8 *data, u32 seg_len)
+{
+ struct sk_buff *skb;
+ u32 true_len;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD))
+ seg_len -= 2;
+
+ skb = alloc_skb(seg_len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
+ int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len);
+
+ memcpy(skb_put(skb, hdr_len), data, hdr_len);
+ data += hdr_len + 2;
+ seg_len -= hdr_len;
+ }
+
+ memcpy(skb_put(skb, seg_len), data, seg_len);
+
+ true_len = mt76_mac_process_rx(dev, skb, skb->data, rxwi);
+ skb_trim(skb, true_len);
+
+ return skb;
+}
+
+static struct sk_buff *
+mt7601u_rx_skb_from_seg_paged(struct mt7601u_dev *dev,
+ struct mt7601u_rxwi *rxwi, void *data,
+ u32 seg_len, u32 truesize, struct page *p)
+{
+ unsigned int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len);
+ unsigned int true_len, copy, frag;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
+ memcpy(skb_put(skb, hdr_len), data, hdr_len);
+ data += hdr_len + 2;
+ true_len -= hdr_len;
+ hdr_len = 0;
+ }
+
+ copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
+ frag = true_len - copy;
+
+ memcpy(skb_put(skb, copy), data, copy);
+ data += copy;
+
+ if (frag) {
+ skb_add_rx_frag(skb, 0, p, data - page_address(p),
+ frag, truesize);
+ get_page(p);
+ }
+
+ return skb;
+}
+
+static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
+ u32 seg_len, struct page *p, bool paged)
+{
+ struct sk_buff *skb;
+ struct mt7601u_rxwi *rxwi;
+ u32 fce_info, truesize = seg_len;
+
+ /* DMA_INFO field at the beginning of the segment contains only some of
+ * the information, we need to read the FCE descriptor from the end.
+ */
+ fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
+ seg_len -= MT_FCE_INFO_LEN;
+
+ data += MT_DMA_HDR_LEN;
+ seg_len -= MT_DMA_HDR_LEN;
+
+ rxwi = (struct mt7601u_rxwi *) data;
+ data += sizeof(struct mt7601u_rxwi);
+ seg_len -= sizeof(struct mt7601u_rxwi);
+
+ if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
+ dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
+ if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info)))
+ dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
+
+ trace_mt_rx(dev, rxwi, fce_info);
+
+ if (paged)
+ skb = mt7601u_rx_skb_from_seg_paged(dev, rxwi, data, seg_len,
+ truesize, p);
+ else
+ skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len);
+ if (!skb)
+ return;
+
+ ieee80211_rx_ni(dev->hw, skb);
+}
+
+static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
+{
+ u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
+ sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
+ u16 dma_len = get_unaligned_le16(data);
+
+ if (data_len < min_seg_len ||
+ WARN_ON(!dma_len) ||
+ WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
+ WARN_ON(dma_len & 0x3))
+ return 0;
+
+ return MT_DMA_HDRS + dma_len;
+}
+
+static void
+mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
+{
+ u32 seg_len, data_len = e->urb->actual_length;
+ u8 *data = page_address(e->p);
+ struct page *new_p = NULL;
+ bool paged = true;
+ int cnt = 0;
+
+ if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
+ return;
+
+ /* Copy if there is very little data in the buffer. */
+ if (data_len < 512) {
+ paged = false;
+ } else {
+ new_p = dev_alloc_pages(MT_RX_ORDER);
+ if (!new_p)
+ paged = false;
+ }
+
+ while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
+ mt7601u_rx_process_seg(dev, data, seg_len, e->p, paged);
+
+ data_len -= seg_len;
+ data += seg_len;
+ cnt++;
+ }
+
+ if (cnt > 1)
+ trace_mt_rx_dma_aggr(dev, cnt, paged);
+
+ if (paged) {
+ /* we have one extra ref from the allocator */
+ __free_pages(e->p, MT_RX_ORDER);
+
+ e->p = new_p;
+ }
+}
+
+static struct mt7601u_dma_buf_rx *
+mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
+{
+ struct mt7601u_rx_queue *q = &dev->rx_q;
+ struct mt7601u_dma_buf_rx *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ if (!q->pending)
+ goto out;
+
+ buf = &q->e[q->start];
+ q->pending--;
+ q->start = (q->start + 1) % q->entries;
+out:
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+
+ return buf;
+}
+
+static void mt7601u_complete_rx(struct urb *urb)
+{
+ struct mt7601u_dev *dev = urb->context;
+ struct mt7601u_rx_queue *q = &dev->rx_q;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ if (mt7601u_urb_has_error(urb))
+ dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
+ if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
+ goto out;
+
+ q->end = (q->end + 1) % q->entries;
+ q->pending++;
+ tasklet_schedule(&dev->rx_tasklet);
+out:
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static void mt7601u_rx_tasklet(unsigned long data)
+{
+ struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
+ struct mt7601u_dma_buf_rx *e;
+
+ while ((e = mt7601u_rx_get_pending_entry(dev))) {
+ if (e->urb->status)
+ continue;
+
+ mt7601u_rx_process_entry(dev, e);
+ mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
+ }
+}
+
+static void mt7601u_complete_tx(struct urb *urb)
+{
+ struct mt7601u_tx_queue *q = urb->context;
+ struct mt7601u_dev *dev = q->dev;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ if (mt7601u_urb_has_error(urb))
+ dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
+ if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
+ goto out;
+
+ skb = q->e[q->start].skb;
+ trace_mt_tx_dma_done(dev, skb);
+
+ mt7601u_tx_status(dev, skb);
+
+ if (q->used == q->entries - q->entries / 8)
+ ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
+
+ q->start = (q->start + 1) % q->entries;
+ q->used--;
+
+ if (urb->status)
+ goto out;
+
+ set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
+ if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(10));
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
+ struct sk_buff *skb, u8 ep)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
+ struct mt7601u_dma_buf_tx *e;
+ struct mt7601u_tx_queue *q = &dev->tx_q[ep];
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ if (WARN_ON(q->entries <= q->used)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ e = &q->e[q->end];
+ e->skb = skb;
+ usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
+ mt7601u_complete_tx, q);
+ ret = usb_submit_urb(e->urb, GFP_ATOMIC);
+ if (ret) {
+ /* Special-handle ENODEV from TX urb submission because it will
+ * often be the first ENODEV we see after device is removed.
+ */
+ if (ret == -ENODEV)
+ set_bit(MT7601U_STATE_REMOVED, &dev->state);
+ else
+ dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
+ ret);
+ goto out;
+ }
+
+ q->end = (q->end + 1) % q->entries;
+ q->used++;
+
+ if (q->used >= q->entries)
+ ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ return ret;
+}
+
+/* Map hardware Q to USB endpoint number */
+static u8 q2ep(u8 qid)
+{
+ /* TODO: take management packets to queue 5 */
+ return qid + 1;
+}
+
+/* Map USB endpoint number to Q id in the DMA engine */
+static enum mt76_qsel ep2dmaq(u8 ep)
+{
+ if (ep == 5)
+ return MT_QSEL_MGMT;
+ return MT_QSEL_EDCA;
+}
+
+int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
+ struct mt76_wcid *wcid, int hw_q)
+{
+ u8 ep = q2ep(hw_q);
+ u32 dma_flags;
+ int ret;
+
+ dma_flags = MT_TXD_PKT_INFO_80211;
+ if (wcid->hw_key_idx == 0xff)
+ dma_flags |= MT_TXD_PKT_INFO_WIV;
+
+ ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
+ if (ret)
+ return ret;
+
+ ret = mt7601u_dma_submit_tx(dev, skb, ep);
+ if (ret) {
+ ieee80211_free_txskb(dev->hw, skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mt7601u_kill_rx(struct mt7601u_dev *dev)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ int next = dev->rx_q.end;
+
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+ usb_poison_urb(dev->rx_q.e[next].urb);
+ spin_lock_irqsave(&dev->rx_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
+ struct mt7601u_dma_buf_rx *e, gfp_t gfp)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ u8 *buf = page_address(e->p);
+ unsigned pipe;
+ int ret;
+
+ pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
+
+ usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
+ mt7601u_complete_rx, dev);
+
+ trace_mt_submit_urb(dev, e->urb);
+ ret = usb_submit_urb(e->urb, gfp);
+ if (ret)
+ dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
+
+ return ret;
+}
+
+static int mt7601u_submit_rx(struct mt7601u_dev *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mt7601u_free_rx(struct mt7601u_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
+ usb_free_urb(dev->rx_q.e[i].urb);
+ }
+}
+
+static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
+{
+ int i;
+
+ memset(&dev->rx_q, 0, sizeof(dev->rx_q));
+ dev->rx_q.dev = dev;
+ dev->rx_q.entries = N_RX_ENTRIES;
+
+ for (i = 0; i < N_RX_ENTRIES; i++) {
+ dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
+
+ if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
+{
+ int i;
+
+ WARN_ON(q->used);
+
+ for (i = 0; i < q->entries; i++) {
+ usb_poison_urb(q->e[i].urb);
+ usb_free_urb(q->e[i].urb);
+ }
+}
+
+static void mt7601u_free_tx(struct mt7601u_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < __MT_EP_OUT_MAX; i++)
+ mt7601u_free_tx_queue(&dev->tx_q[i]);
+}
+
+static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
+ struct mt7601u_tx_queue *q)
+{
+ int i;
+
+ q->dev = dev;
+ q->entries = N_TX_ENTRIES;
+
+ for (i = 0; i < N_TX_ENTRIES; i++) {
+ q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!q->e[i].urb)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
+{
+ int i;
+
+ dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
+ sizeof(*dev->tx_q), GFP_KERNEL);
+
+ for (i = 0; i < __MT_EP_OUT_MAX; i++)
+ if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mt7601u_dma_init(struct mt7601u_dev *dev)
+{
+ int ret = -ENOMEM;
+
+ tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
+
+ ret = mt7601u_alloc_tx(dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_alloc_rx(dev);
+ if (ret)
+ goto err;
+
+ ret = mt7601u_submit_rx(dev);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ mt7601u_dma_cleanup(dev);
+ return ret;
+}
+
+void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
+{
+ mt7601u_kill_rx(dev);
+
+ tasklet_kill(&dev->rx_tasklet);
+
+ mt7601u_free_rx(dev);
+ mt7601u_free_tx(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.h b/drivers/net/wireless/mediatek/mt7601u/dma.h
new file mode 100644
index 000000000000..978e8a90b87f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_DMA_H
+#define __MT7601U_DMA_H
+
+#include <asm/unaligned.h>
+#include <linux/skbuff.h>
+
+#include "util.h"
+
+#define MT_DMA_HDR_LEN 4
+#define MT_RX_INFO_LEN 4
+#define MT_FCE_INFO_LEN 4
+#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
+
+/* Common Tx DMA descriptor fields */
+#define MT_TXD_INFO_LEN GENMASK(15, 0)
+#define MT_TXD_INFO_D_PORT GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE GENMASK(31, 30)
+
+enum mt76_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
+enum mt76_info_type {
+ DMA_PACKET,
+ DMA_COMMAND,
+};
+
+/* Tx DMA packet specific flags */
+#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_PKT_INFO_TX_BURST BIT(17)
+#define MT_TXD_PKT_INFO_80211 BIT(19)
+#define MT_TXD_PKT_INFO_TSO BIT(20)
+#define MT_TXD_PKT_INFO_CSO BIT(21)
+#define MT_TXD_PKT_INFO_WIV BIT(24)
+#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25)
+
+enum mt76_qsel {
+ MT_QSEL_MGMT,
+ MT_QSEL_HCCA,
+ MT_QSEL_EDCA,
+ MT_QSEL_EDCA_2,
+};
+
+/* Tx DMA MCU command specific flags */
+#define MT_TXD_CMD_INFO_SEQ GENMASK(19, 16)
+#define MT_TXD_CMD_INFO_TYPE GENMASK(26, 20)
+
+static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
+ enum mt76_msg_port d_port,
+ enum mt76_info_type type, u32 flags)
+{
+ u32 info;
+
+ /* Buffer layout:
+ * | 4B | xfer len | pad | 4B |
+ * | TXINFO | pkt/cmd | zero pad to 4B | zero |
+ *
+ * length field of TXINFO should be set to 'xfer len'.
+ */
+
+ info = flags |
+ MT76_SET(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ MT76_SET(MT_TXD_INFO_D_PORT, d_port) |
+ MT76_SET(MT_TXD_INFO_TYPE, type);
+
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+ return skb_put_padto(skb, round_up(skb->len, 4) + 4);
+}
+
+static inline int
+mt7601u_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
+{
+ flags |= MT76_SET(MT_TXD_PKT_INFO_QSEL, qsel);
+ return mt7601u_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
+}
+
+/* Common Rx DMA descriptor fields */
+#define MT_RXD_INFO_LEN GENMASK(13, 0)
+#define MT_RXD_INFO_PCIE_INTR BIT(24)
+#define MT_RXD_INFO_QSEL GENMASK(26, 25)
+#define MT_RXD_INFO_PORT GENMASK(29, 27)
+#define MT_RXD_INFO_TYPE GENMASK(31, 30)
+
+/* Rx DMA packet specific flags */
+#define MT_RXD_PKT_INFO_UDP_ERR BIT(16)
+#define MT_RXD_PKT_INFO_TCP_ERR BIT(17)
+#define MT_RXD_PKT_INFO_IP_ERR BIT(18)
+#define MT_RXD_PKT_INFO_PKT_80211 BIT(19)
+#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20)
+#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21)
+
+/* Rx DMA MCU command specific flags */
+#define MT_RXD_CMD_INFO_SELF_GEN BIT(15)
+#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20)
+
+enum mt76_evt_type {
+ CMD_DONE,
+ CMD_ERROR,
+ CMD_RETRY,
+ EVENT_PWR_RSP,
+ EVENT_WOW_RSP,
+ EVENT_CARRIER_DETECT_RSP,
+ EVENT_DFS_DETECT_RSP,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
new file mode 100644
index 000000000000..ce3837f270f0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include "mt7601u.h"
+#include "eeprom.h"
+
+static bool
+field_valid(u8 val)
+{
+ return val != 0xff;
+}
+
+static s8
+field_validate(u8 val)
+{
+ if (!field_valid(val))
+ return 0;
+
+ return val;
+}
+
+static int
+mt7601u_efuse_read(struct mt7601u_dev *dev, u16 addr, u8 *data,
+ enum mt7601u_eeprom_access_modes mode)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN |
+ MT_EFUSE_CTRL_MODE);
+ val |= MT76_SET(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
+ MT76_SET(MT_EFUSE_CTRL_MODE, mode) |
+ MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+ /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
+ * will not return valid data but it's ok.
+ */
+ memset(data, 0xff, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, MT_EFUSE_DATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+static int
+mt7601u_efuse_physical_size_check(struct mt7601u_dev *dev)
+{
+ const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16);
+ u8 data[map_reads * 16];
+ int ret, i;
+ u32 start = 0, end = 0, cnt_free;
+
+ for (i = 0; i < map_reads; i++) {
+ ret = mt7601u_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
+ data + i * 16, MT_EE_PHYSICAL_READ);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
+ if (!data[i]) {
+ if (!start)
+ start = MT_EE_USAGE_MAP_START + i;
+ end = MT_EE_USAGE_MAP_START + i;
+ }
+ cnt_free = end - start + 1;
+
+ if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
+ dev_err(dev->dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool
+mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+ return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+static void
+mt7601u_set_chip_cap(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
+ u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+ if (!field_valid(nic_conf1 & 0xff))
+ nic_conf1 &= 0xff00;
+
+ dev->ee->tssi_enabled = mt7601u_has_tssi(dev, eeprom) &&
+ !(nic_conf1 & MT_EE_NIC_CONF_1_TEMP_TX_ALC);
+
+ if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
+ dev_err(dev->dev,
+ "Error: this driver does not support HW RF ctrl\n");
+
+ if (!field_valid(nic_conf0 >> 8))
+ return;
+
+ if (MT76_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+ MT76_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+ dev_err(dev->dev,
+ "Error: device has more than 1 RX/TX stream!\n");
+}
+
+static int
+mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *eeprom)
+{
+ const void *src = eeprom + MT_EE_MAC_ADDR;
+
+ ether_addr_copy(dev->macaddr, src);
+
+ if (!is_valid_ether_addr(dev->macaddr)) {
+ eth_random_addr(dev->macaddr);
+ dev_info(dev->dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->macaddr);
+ }
+
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
+ MT76_SET(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+
+ return 0;
+}
+
+static void mt7601u_set_channel_target_power(struct mt7601u_dev *dev,
+ u8 *eeprom, u8 max_pwr)
+{
+ u8 trgt_pwr = eeprom[MT_EE_TX_TSSI_TARGET_POWER];
+
+ if (trgt_pwr > max_pwr || !trgt_pwr) {
+ dev_warn(dev->dev, "Error: EEPROM trgt power invalid %hhx!\n",
+ trgt_pwr);
+ trgt_pwr = 0x20;
+ }
+
+ memset(dev->ee->chan_pwr, trgt_pwr, sizeof(dev->ee->chan_pwr));
+}
+
+static void
+mt7601u_set_channel_power(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u32 i, val;
+ u8 max_pwr;
+
+ val = mt7601u_rr(dev, MT_TX_ALC_CFG_0);
+ max_pwr = MT76_GET(MT_TX_ALC_CFG_0_LIMIT_0, val);
+
+ if (mt7601u_has_tssi(dev, eeprom)) {
+ mt7601u_set_channel_target_power(dev, eeprom, max_pwr);
+ return;
+ }
+
+ for (i = 0; i < 14; i++) {
+ s8 power = field_validate(eeprom[MT_EE_TX_POWER_OFFSET + i]);
+
+ if (power > max_pwr || power < 0)
+ power = MT7601U_DEFAULT_TX_POWER;
+
+ dev->ee->chan_pwr[i] = power;
+ }
+}
+
+static void
+mt7601u_set_country_reg(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ /* Note: - region 31 is not valid for mt7601u (see rtmp_init.c)
+ * - comments in rtmp_def.h are incorrect (see rt_channel.c)
+ */
+ static const struct reg_channel_bounds chan_bounds[] = {
+ /* EEPROM country regions 0 - 7 */
+ { 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 },
+ { 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 },
+ /* EEPROM country regions 32 - 33 */
+ { 1, 11 }, { 1, 14 }
+ };
+ u8 val = eeprom[MT_EE_COUNTRY_REGION];
+ int idx = -1;
+
+ if (val < 8)
+ idx = val;
+ if (val > 31 && val < 33)
+ idx = val - 32 + 8;
+
+ if (idx != -1)
+ dev_info(dev->dev,
+ "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
+ val, chan_bounds[idx].start,
+ chan_bounds[idx].start + chan_bounds[idx].num - 1);
+ else
+ idx = 5; /* channels 1 - 14 */
+
+ dev->ee->reg = chan_bounds[idx];
+
+ /* TODO: country region 33 is special - phy should be set to B-mode
+ * before entering channel 14 (see sta/connect.c)
+ */
+}
+
+static void
+mt7601u_set_rf_freq_off(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u8 comp;
+
+ dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
+ comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
+
+ if (comp & BIT(7))
+ dev->ee->rf_freq_off -= comp & 0x7f;
+ else
+ dev->ee->rf_freq_off += comp;
+}
+
+static void
+mt7601u_set_rssi_offset(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ int i;
+ s8 *rssi_offset = dev->ee->rssi_offset;
+
+ for (i = 0; i < 2; i++) {
+ rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
+
+ if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+ dev_warn(dev->dev,
+ "Warning: EEPROM RSSI is invalid %02hhx\n",
+ rssi_offset[i]);
+ rssi_offset[i] = 0;
+ }
+ }
+}
+
+static void
+mt7601u_extra_power_over_mac(struct mt7601u_dev *dev)
+{
+ u32 val;
+
+ val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_1) & 0x0000ff00) >> 8);
+ val |= ((mt7601u_rr(dev, MT_TX_PWR_CFG_2) & 0x0000ff00) << 8);
+ mt7601u_wr(dev, MT_TX_PWR_CFG_7, val);
+
+ val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
+ mt7601u_wr(dev, MT_TX_PWR_CFG_9, val);
+}
+
+static void
+mt7601u_set_power_rate(struct power_per_rate *rate, s8 delta, u8 value)
+{
+ rate->raw = s6_validate(value);
+ rate->bw20 = s6_to_int(value);
+ /* Note: vendor driver does cap the value to s6 right away */
+ rate->bw40 = rate->bw20 + delta;
+}
+
+static void
+mt7601u_save_power_rate(struct mt7601u_dev *dev, s8 delta, u32 val, int i)
+{
+ struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+
+ switch (i) {
+ case 0:
+ mt7601u_set_power_rate(&t->cck[0], delta, (val >> 0) & 0xff);
+ mt7601u_set_power_rate(&t->cck[1], delta, (val >> 8) & 0xff);
+ /* Save cck bw20 for fixups of channel 14 */
+ dev->ee->real_cck_bw20[0] = t->cck[0].bw20;
+ dev->ee->real_cck_bw20[1] = t->cck[1].bw20;
+
+ mt7601u_set_power_rate(&t->ofdm[0], delta, (val >> 16) & 0xff);
+ mt7601u_set_power_rate(&t->ofdm[1], delta, (val >> 24) & 0xff);
+ break;
+ case 1:
+ mt7601u_set_power_rate(&t->ofdm[2], delta, (val >> 0) & 0xff);
+ mt7601u_set_power_rate(&t->ofdm[3], delta, (val >> 8) & 0xff);
+ mt7601u_set_power_rate(&t->ht[0], delta, (val >> 16) & 0xff);
+ mt7601u_set_power_rate(&t->ht[1], delta, (val >> 24) & 0xff);
+ break;
+ case 2:
+ mt7601u_set_power_rate(&t->ht[2], delta, (val >> 0) & 0xff);
+ mt7601u_set_power_rate(&t->ht[3], delta, (val >> 8) & 0xff);
+ break;
+ }
+}
+
+static s8
+get_delta(u8 val)
+{
+ s8 ret;
+
+ if (!field_valid(val) || !(val & BIT(7)))
+ return 0;
+
+ ret = val & 0x1f;
+ if (ret > 8)
+ ret = 8;
+ if (val & BIT(6))
+ ret = -ret;
+
+ return ret;
+}
+
+static void
+mt7601u_config_tx_power_per_rate(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u32 val;
+ s8 bw40_delta;
+ int i;
+
+ bw40_delta = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
+
+ for (i = 0; i < 5; i++) {
+ val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
+
+ mt7601u_save_power_rate(dev, bw40_delta, val, i);
+
+ if (~val)
+ mt7601u_wr(dev, MT_TX_PWR_CFG_0 + i * 4, val);
+ }
+
+ mt7601u_extra_power_over_mac(dev);
+}
+
+static void
+mt7601u_init_tssi_params(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ struct tssi_data *d = &dev->ee->tssi_data;
+
+ if (!dev->ee->tssi_enabled)
+ return;
+
+ d->slope = eeprom[MT_EE_TX_TSSI_SLOPE];
+ d->tx0_delta_offset = eeprom[MT_EE_TX_TSSI_OFFSET] * 1024;
+ d->offset[0] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP];
+ d->offset[1] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 1];
+ d->offset[2] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 2];
+}
+
+int
+mt7601u_eeprom_init(struct mt7601u_dev *dev)
+{
+ u8 *eeprom;
+ int i, ret;
+
+ ret = mt7601u_efuse_physical_size_check(dev);
+ if (ret)
+ return ret;
+
+ dev->ee = devm_kzalloc(dev->dev, sizeof(*dev->ee), GFP_KERNEL);
+ if (!dev->ee)
+ return -ENOMEM;
+
+ eeprom = kmalloc(MT7601U_EEPROM_SIZE, GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+
+ for (i = 0; i + 16 <= MT7601U_EEPROM_SIZE; i += 16) {
+ ret = mt7601u_efuse_read(dev, i, eeprom + i, MT_EE_READ);
+ if (ret)
+ goto out;
+ }
+
+ if (eeprom[MT_EE_VERSION_EE] > MT7601U_EE_MAX_VER)
+ dev_warn(dev->dev,
+ "Warning: unsupported EEPROM version %02hhx\n",
+ eeprom[MT_EE_VERSION_EE]);
+ dev_info(dev->dev, "EEPROM ver:%02hhx fae:%02hhx\n",
+ eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
+
+ mt7601u_set_macaddr(dev, eeprom);
+ mt7601u_set_chip_cap(dev, eeprom);
+ mt7601u_set_channel_power(dev, eeprom);
+ mt7601u_set_country_reg(dev, eeprom);
+ mt7601u_set_rf_freq_off(dev, eeprom);
+ mt7601u_set_rssi_offset(dev, eeprom);
+ dev->ee->ref_temp = eeprom[MT_EE_REF_TEMP];
+ dev->ee->lna_gain = eeprom[MT_EE_LNA_GAIN];
+
+ mt7601u_config_tx_power_per_rate(dev, eeprom);
+
+ mt7601u_init_tssi_params(dev, eeprom);
+out:
+ kfree(eeprom);
+ return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
new file mode 100644
index 000000000000..662d12703b69
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_EEPROM_H
+#define __MT7601U_EEPROM_H
+
+struct mt7601u_dev;
+
+#define MT7601U_EE_MAX_VER 0x0c
+#define MT7601U_EEPROM_SIZE 256
+
+#define MT7601U_DEFAULT_TX_POWER 6
+
+enum mt76_eeprom_field {
+ MT_EE_CHIP_ID = 0x00,
+ MT_EE_VERSION_FAE = 0x02,
+ MT_EE_VERSION_EE = 0x03,
+ MT_EE_MAC_ADDR = 0x04,
+ MT_EE_NIC_CONF_0 = 0x34,
+ MT_EE_NIC_CONF_1 = 0x36,
+ MT_EE_COUNTRY_REGION = 0x39,
+ MT_EE_FREQ_OFFSET = 0x3a,
+ MT_EE_NIC_CONF_2 = 0x42,
+
+ MT_EE_LNA_GAIN = 0x44,
+ MT_EE_RSSI_OFFSET = 0x46,
+
+ MT_EE_TX_POWER_DELTA_BW40 = 0x50,
+ MT_EE_TX_POWER_OFFSET = 0x52,
+
+ MT_EE_TX_TSSI_SLOPE = 0x6e,
+ MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f,
+ MT_EE_TX_TSSI_OFFSET = 0x76,
+
+ MT_EE_TX_TSSI_TARGET_POWER = 0xd0,
+ MT_EE_REF_TEMP = 0xd1,
+ MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb,
+ MT_EE_TX_POWER_BYRATE_BASE = 0xde,
+
+ MT_EE_USAGE_MAP_START = 0x1e0,
+ MT_EE_USAGE_MAP_END = 0x1fc,
+};
+
+#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0)
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
+
+#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \
+ (i) * 4)
+
+#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
+ MT_EE_USAGE_MAP_START + 1)
+
+enum mt7601u_eeprom_access_modes {
+ MT_EE_READ = 0,
+ MT_EE_PHYSICAL_READ = 1,
+};
+
+struct power_per_rate {
+ u8 raw; /* validated s6 value */
+ s8 bw20; /* sign-extended int */
+ s8 bw40; /* sign-extended int */
+};
+
+/* Power per rate - one value per two rates */
+struct mt7601u_rate_power {
+ struct power_per_rate cck[2];
+ struct power_per_rate ofdm[4];
+ struct power_per_rate ht[4];
+};
+
+struct reg_channel_bounds {
+ u8 start;
+ u8 num;
+};
+
+struct mt7601u_eeprom_params {
+ bool tssi_enabled;
+ u8 rf_freq_off;
+ s8 rssi_offset[2];
+ s8 ref_temp;
+ s8 lna_gain;
+
+ u8 chan_pwr[14];
+ struct mt7601u_rate_power power_rate_table;
+ s8 real_cck_bw20[2];
+
+ /* TSSI stuff - only with internal TX ALC */
+ struct tssi_data {
+ int tx0_delta_offset;
+ u8 slope;
+ u8 offset[3];
+ } tssi_data;
+
+ struct reg_channel_bounds reg;
+};
+
+int mt7601u_eeprom_init(struct mt7601u_dev *dev);
+
+static inline u32 s6_validate(u32 reg)
+{
+ WARN_ON(reg & ~GENMASK(5, 0));
+ return reg & GENMASK(5, 0);
+}
+
+static inline int s6_to_int(u32 reg)
+{
+ int s6;
+
+ s6 = s6_validate(reg);
+ if (s6 & BIT(5))
+ s6 -= BIT(6);
+
+ return s6;
+}
+
+static inline u32 int_to_s6(int val)
+{
+ if (val < -0x20)
+ return 0x20;
+ if (val > 0x1f)
+ return 0x1f;
+
+ return val & 0x3f;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
new file mode 100644
index 000000000000..1fc86e865c8c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -0,0 +1,625 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "mcu.h"
+
+#include "initvals.h"
+
+static void
+mt7601u_set_wlan_state(struct mt7601u_dev *dev, u32 val, bool enable)
+{
+ int i;
+
+ /* Note: we don't turn off WLAN_CLK because that makes the device
+ * not respond properly on the probe path.
+ * In case anyone (PSM?) wants to use this function we can
+ * bring the clock stuff back and fixup the probe path.
+ */
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
+
+ mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ if (enable) {
+ set_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state);
+ } else {
+ clear_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state);
+ return;
+ }
+
+ for (i = 200; i; i--) {
+ val = mt7601u_rr(dev, MT_CMB_CTRL);
+
+ if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
+ break;
+
+ udelay(20);
+ }
+
+ /* Note: vendor driver tries to disable/enable wlan here and retry
+ * but the code which does it is so buggy it must have never
+ * triggered, so don't bother.
+ */
+ if (!i)
+ dev_err(dev->dev, "Error: PLL and XTAL check failed!\n");
+}
+
+static void mt7601u_chip_onoff(struct mt7601u_dev *dev, bool enable, bool reset)
+{
+ u32 val;
+
+ mutex_lock(&dev->hw_atomic_mutex);
+
+ val = mt7601u_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (reset) {
+ val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ }
+ }
+
+ mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ mt7601u_set_wlan_state(dev, val, enable);
+
+ mutex_unlock(&dev->hw_atomic_mutex);
+}
+
+static void mt7601u_reset_csr_bbp(struct mt7601u_dev *dev)
+{
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, (MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP));
+ mt7601u_wr(dev, MT_USB_DMA_CFG, 0);
+ msleep(1);
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+}
+
+static void mt7601u_init_usb_dma(struct mt7601u_dev *dev)
+{
+ u32 val;
+
+ val = MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
+ MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
+ MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN;
+ if (dev->in_max_packet == 512)
+ val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+ mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+
+ val |= MT_USB_DMA_CFG_UDMA_RX_WL_DROP;
+ mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_UDMA_RX_WL_DROP;
+ mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+}
+
+static int mt7601u_init_bbp(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ ret = mt7601u_wait_bbp_ready(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_common_vals,
+ ARRAY_SIZE(bbp_common_vals));
+ if (ret)
+ return ret;
+
+ return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_chip_vals,
+ ARRAY_SIZE(bbp_chip_vals));
+}
+
+static void
+mt76_init_beacon_offsets(struct mt7601u_dev *dev)
+{
+ u16 base = MT_BEACON_BASE;
+ u32 regs[4] = {};
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ u16 addr = dev->beacon_offsets[i];
+
+ regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+ }
+
+ for (i = 0; i < 4; i++)
+ mt7601u_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static int mt7601u_write_mac_initvals(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, mac_common_vals,
+ ARRAY_SIZE(mac_common_vals));
+ if (ret)
+ return ret;
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN,
+ mac_chip_vals, ARRAY_SIZE(mac_chip_vals));
+ if (ret)
+ return ret;
+
+ mt76_init_beacon_offsets(dev);
+
+ mt7601u_wr(dev, MT_AUX_CLK_CFG, 0);
+
+ return 0;
+}
+
+static int mt7601u_init_wcid_mem(struct mt7601u_dev *dev)
+{
+ u32 *vals;
+ int i, ret;
+
+ vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ for (i = 0; i < N_WCIDS; i++) {
+ vals[i * 2] = 0xffffffff;
+ vals[i * 2 + 1] = 0x00ffffff;
+ }
+
+ ret = mt7601u_burst_write_regs(dev, MT_WCID_ADDR_BASE,
+ vals, N_WCIDS * 2);
+ kfree(vals);
+
+ return ret;
+}
+
+static int mt7601u_init_key_mem(struct mt7601u_dev *dev)
+{
+ u32 vals[4] = {};
+
+ return mt7601u_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
+ vals, ARRAY_SIZE(vals));
+}
+
+static int mt7601u_init_wcid_attr_mem(struct mt7601u_dev *dev)
+{
+ u32 *vals;
+ int i, ret;
+
+ vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ for (i = 0; i < N_WCIDS * 2; i++)
+ vals[i] = 1;
+
+ ret = mt7601u_burst_write_regs(dev, MT_WCID_ATTR_BASE,
+ vals, N_WCIDS * 2);
+ kfree(vals);
+
+ return ret;
+}
+
+static void mt7601u_reset_counters(struct mt7601u_dev *dev)
+{
+ mt7601u_rr(dev, MT_RX_STA_CNT0);
+ mt7601u_rr(dev, MT_RX_STA_CNT1);
+ mt7601u_rr(dev, MT_RX_STA_CNT2);
+ mt7601u_rr(dev, MT_TX_STA_CNT0);
+ mt7601u_rr(dev, MT_TX_STA_CNT1);
+ mt7601u_rr(dev, MT_TX_STA_CNT2);
+}
+
+int mt7601u_mac_start(struct mt7601u_dev *dev)
+{
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
+ return -ETIMEDOUT;
+
+ dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
+ MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
+ MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
+ MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
+ MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
+ mt7601u_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void mt7601u_mac_stop_hw(struct mt7601u_dev *dev)
+{
+ int i, ok;
+
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+ dev_warn(dev->dev, "Warning: TX DMA did not stop!\n");
+
+ /* Page count on TxQ */
+ i = 200;
+ while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
+ (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
+ (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
+ msleep(10);
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
+ dev_warn(dev->dev, "Warning: MAC TX did not stop!\n");
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
+ MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ /* Page count on RxQ */
+ ok = 0;
+ i = 200;
+ while (i--) {
+ if ((mt76_rr(dev, 0x0430) & 0x00ff0000) ||
+ (mt76_rr(dev, 0x0a30) & 0xffffffff) ||
+ (mt76_rr(dev, 0x0a34) & 0xffffffff))
+ ok++;
+ if (ok > 6)
+ break;
+
+ msleep(1);
+ }
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
+ dev_warn(dev->dev, "Warning: MAC RX did not stop!\n");
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+ dev_warn(dev->dev, "Warning: RX DMA did not stop!\n");
+}
+
+void mt7601u_mac_stop(struct mt7601u_dev *dev)
+{
+ mt7601u_mac_stop_hw(dev);
+ flush_delayed_work(&dev->stat_work);
+ cancel_delayed_work_sync(&dev->stat_work);
+}
+
+static void mt7601u_stop_hardware(struct mt7601u_dev *dev)
+{
+ mt7601u_chip_onoff(dev, false, false);
+}
+
+int mt7601u_init_hardware(struct mt7601u_dev *dev)
+{
+ static const u16 beacon_offsets[16] = {
+ /* 512 byte per beacon */
+ 0xc000, 0xc200, 0xc400, 0xc600,
+ 0xc800, 0xca00, 0xcc00, 0xce00,
+ 0xd000, 0xd200, 0xd400, 0xd600,
+ 0xd800, 0xda00, 0xdc00, 0xde00
+ };
+ int ret;
+
+ dev->beacon_offsets = beacon_offsets;
+
+ mt7601u_chip_onoff(dev, true, false);
+
+ ret = mt7601u_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_mcu_init(dev);
+ if (ret)
+ goto err;
+
+ if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ /* Wait for ASIC ready after FW load. */
+ ret = mt7601u_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+
+ mt7601u_reset_csr_bbp(dev);
+ mt7601u_init_usb_dma(dev);
+
+ ret = mt7601u_mcu_cmd_init(dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_dma_init(dev);
+ if (ret)
+ goto err_mcu;
+ ret = mt7601u_write_mac_initvals(dev);
+ if (ret)
+ goto err_rx;
+
+ if (!mt76_poll_msec(dev, MT_MAC_STATUS,
+ MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 100)) {
+ ret = -EIO;
+ goto err_rx;
+ }
+
+ ret = mt7601u_init_bbp(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt7601u_init_wcid_mem(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt7601u_init_key_mem(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt7601u_init_wcid_attr_mem(dev);
+ if (ret)
+ goto err_rx;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX));
+
+ mt7601u_reset_counters(dev);
+
+ mt7601u_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+
+ mt7601u_wr(dev, MT_TXOP_CTRL_CFG, MT76_SET(MT_TXOP_TRUN_EN, 0x3f) |
+ MT76_SET(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+ ret = mt7601u_eeprom_init(dev);
+ if (ret)
+ goto err_rx;
+
+ ret = mt7601u_phy_init(dev);
+ if (ret)
+ goto err_rx;
+
+ mt7601u_set_rx_path(dev, 0);
+ mt7601u_set_tx_dac(dev, 0);
+
+ mt7601u_mac_set_ctrlch(dev, false);
+ mt7601u_bbp_set_ctrlch(dev, false);
+ mt7601u_bbp_set_bw(dev, MT_BW_20);
+
+ return 0;
+
+err_rx:
+ mt7601u_dma_cleanup(dev);
+err_mcu:
+ mt7601u_mcu_cmd_deinit(dev);
+err:
+ mt7601u_chip_onoff(dev, false, false);
+ return ret;
+}
+
+void mt7601u_cleanup(struct mt7601u_dev *dev)
+{
+ mt7601u_stop_hardware(dev);
+ mt7601u_dma_cleanup(dev);
+ mt7601u_mcu_cmd_deinit(dev);
+}
+
+struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev)
+{
+ struct ieee80211_hw *hw;
+ struct mt7601u_dev *dev;
+
+ hw = ieee80211_alloc_hw(sizeof(*dev), &mt7601u_ops);
+ if (!hw)
+ return NULL;
+
+ dev = hw->priv;
+ dev->dev = pdev;
+ dev->hw = hw;
+ mutex_init(&dev->vendor_req_mutex);
+ mutex_init(&dev->reg_atomic_mutex);
+ mutex_init(&dev->hw_atomic_mutex);
+ mutex_init(&dev->mutex);
+ spin_lock_init(&dev->tx_lock);
+ spin_lock_init(&dev->rx_lock);
+ spin_lock_init(&dev->lock);
+ spin_lock_init(&dev->con_mon_lock);
+ atomic_set(&dev->avg_ampdu_len, 1);
+
+ dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0);
+ if (!dev->stat_wq) {
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+
+ return dev;
+}
+
+#define CHAN2G(_idx, _freq) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+ CHAN2G(1, 2412),
+ CHAN2G(2, 2417),
+ CHAN2G(3, 2422),
+ CHAN2G(4, 2427),
+ CHAN2G(5, 2432),
+ CHAN2G(6, 2437),
+ CHAN2G(7, 2442),
+ CHAN2G(8, 2447),
+ CHAN2G(9, 2452),
+ CHAN2G(10, 2457),
+ CHAN2G(11, 2462),
+ CHAN2G(12, 2467),
+ CHAN2G(13, 2472),
+ CHAN2G(14, 2484),
+};
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+}
+
+static struct ieee80211_rate mt76_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+
+static int
+mt76_init_sband(struct mt7601u_dev *dev, struct ieee80211_supported_band *sband,
+ const struct ieee80211_channel *chan, int n_chan,
+ struct ieee80211_rate *rates, int n_rates)
+{
+ struct ieee80211_sta_ht_cap *ht_cap;
+ void *chanlist;
+ int size;
+
+ size = n_chan * sizeof(*chan);
+ chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
+ if (!chanlist)
+ return -ENOMEM;
+
+ sband->channels = chanlist;
+ sband->n_channels = n_chan;
+ sband->bitrates = rates;
+ sband->n_bitrates = n_rates;
+
+ ht_cap = &sband->ht_cap;
+ ht_cap->ht_supported = true;
+ ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ ht_cap->mcs.rx_mask[0] = 0xff;
+ ht_cap->mcs.rx_mask[4] = 0x1;
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
+
+ dev->chandef.chan = &sband->channels[0];
+
+ return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt7601u_dev *dev)
+{
+ dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g),
+ GFP_KERNEL);
+ dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = dev->sband_2g;
+
+ WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
+ ARRAY_SIZE(mt76_channels_2ghz));
+
+ return mt76_init_sband(dev, dev->sband_2g,
+ &mt76_channels_2ghz[dev->ee->reg.start - 1],
+ dev->ee->reg.num,
+ mt76_rates, ARRAY_SIZE(mt76_rates));
+}
+
+int mt7601u_register_device(struct mt7601u_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->hw;
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
+ * entry no. 1 like it does in the vendor driver.
+ */
+ dev->wcid_mask[0] |= 1;
+
+ /* init fake wcid for monitor interfaces */
+ dev->mon_wcid = devm_kmalloc(dev->dev, sizeof(*dev->mon_wcid),
+ GFP_KERNEL);
+ if (!dev->mon_wcid)
+ return -ENOMEM;
+ dev->mon_wcid->idx = 0xff;
+ dev->mon_wcid->hw_key_idx = -1;
+
+ SET_IEEE80211_DEV(hw, dev->dev);
+
+ hw->queues = 4;
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_SUPPORTS_RC_TABLE;
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+
+ hw->sta_data_size = sizeof(struct mt76_sta);
+ hw->vif_data_size = sizeof(struct mt76_vif);
+
+ SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+ wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+ ret = mt76_init_sband_2g(dev);
+ if (ret)
+ return ret;
+
+ INIT_DELAYED_WORK(&dev->mac_work, mt7601u_mac_work);
+ INIT_DELAYED_WORK(&dev->stat_work, mt7601u_tx_stat);
+
+ ret = ieee80211_register_hw(hw);
+ if (ret)
+ return ret;
+
+ mt7601u_init_debugfs(dev);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals.h b/drivers/net/wireless/mediatek/mt7601u/initvals.h
new file mode 100644
index 000000000000..ec11ff66969d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/initvals.h
@@ -0,0 +1,164 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_INITVALS_H
+#define __MT7601U_INITVALS_H
+
+static const struct mt76_reg_pair bbp_common_vals[] = {
+ { 65, 0x2c },
+ { 66, 0x38 },
+ { 68, 0x0b },
+ { 69, 0x12 },
+ { 70, 0x0a },
+ { 73, 0x10 },
+ { 81, 0x37 },
+ { 82, 0x62 },
+ { 83, 0x6a },
+ { 84, 0x99 },
+ { 86, 0x00 },
+ { 91, 0x04 },
+ { 92, 0x00 },
+ { 103, 0x00 },
+ { 105, 0x05 },
+ { 106, 0x35 },
+};
+
+static const struct mt76_reg_pair bbp_chip_vals[] = {
+ { 1, 0x04 }, { 4, 0x40 }, { 20, 0x06 }, { 31, 0x08 },
+ /* CCK Tx Control */
+ { 178, 0xff },
+ /* AGC/Sync controls */
+ { 66, 0x14 }, { 68, 0x8b }, { 69, 0x12 }, { 70, 0x09 },
+ { 73, 0x11 }, { 75, 0x60 }, { 76, 0x44 }, { 84, 0x9a },
+ { 86, 0x38 }, { 91, 0x07 }, { 92, 0x02 },
+ /* Rx Path Controls */
+ { 99, 0x50 }, { 101, 0x00 }, { 103, 0xc0 }, { 104, 0x92 },
+ { 105, 0x3c }, { 106, 0x03 }, { 128, 0x12 },
+ /* Change RXWI content: Gain Report */
+ { 142, 0x04 }, { 143, 0x37 },
+ /* Change RXWI content: Antenna Report */
+ { 142, 0x03 }, { 143, 0x99 },
+ /* Calibration Index Register */
+ /* CCK Receiver Control */
+ { 160, 0xeb }, { 161, 0xc4 }, { 162, 0x77 }, { 163, 0xf9 },
+ { 164, 0x88 }, { 165, 0x80 }, { 166, 0xff }, { 167, 0xe4 },
+ /* Added AGC controls - these AGC/GLRT registers are accessed
+ * through R195 and R196.
+ */
+ { 195, 0x00 }, { 196, 0x00 },
+ { 195, 0x01 }, { 196, 0x04 },
+ { 195, 0x02 }, { 196, 0x20 },
+ { 195, 0x03 }, { 196, 0x0a },
+ { 195, 0x06 }, { 196, 0x16 },
+ { 195, 0x07 }, { 196, 0x05 },
+ { 195, 0x08 }, { 196, 0x37 },
+ { 195, 0x0a }, { 196, 0x15 },
+ { 195, 0x0b }, { 196, 0x17 },
+ { 195, 0x0c }, { 196, 0x06 },
+ { 195, 0x0d }, { 196, 0x09 },
+ { 195, 0x0e }, { 196, 0x05 },
+ { 195, 0x0f }, { 196, 0x09 },
+ { 195, 0x10 }, { 196, 0x20 },
+ { 195, 0x20 }, { 196, 0x17 },
+ { 195, 0x21 }, { 196, 0x06 },
+ { 195, 0x22 }, { 196, 0x09 },
+ { 195, 0x23 }, { 196, 0x17 },
+ { 195, 0x24 }, { 196, 0x06 },
+ { 195, 0x25 }, { 196, 0x09 },
+ { 195, 0x26 }, { 196, 0x17 },
+ { 195, 0x27 }, { 196, 0x06 },
+ { 195, 0x28 }, { 196, 0x09 },
+ { 195, 0x29 }, { 196, 0x05 },
+ { 195, 0x2a }, { 196, 0x09 },
+ { 195, 0x80 }, { 196, 0x8b },
+ { 195, 0x81 }, { 196, 0x12 },
+ { 195, 0x82 }, { 196, 0x09 },
+ { 195, 0x83 }, { 196, 0x17 },
+ { 195, 0x84 }, { 196, 0x11 },
+ { 195, 0x85 }, { 196, 0x00 },
+ { 195, 0x86 }, { 196, 0x00 },
+ { 195, 0x87 }, { 196, 0x18 },
+ { 195, 0x88 }, { 196, 0x60 },
+ { 195, 0x89 }, { 196, 0x44 },
+ { 195, 0x8a }, { 196, 0x8b },
+ { 195, 0x8b }, { 196, 0x8b },
+ { 195, 0x8c }, { 196, 0x8b },
+ { 195, 0x8d }, { 196, 0x8b },
+ { 195, 0x8e }, { 196, 0x09 },
+ { 195, 0x8f }, { 196, 0x09 },
+ { 195, 0x90 }, { 196, 0x09 },
+ { 195, 0x91 }, { 196, 0x09 },
+ { 195, 0x92 }, { 196, 0x11 },
+ { 195, 0x93 }, { 196, 0x11 },
+ { 195, 0x94 }, { 196, 0x11 },
+ { 195, 0x95 }, { 196, 0x11 },
+ /* PPAD */
+ { 47, 0x80 }, { 60, 0x80 }, { 150, 0xd2 }, { 151, 0x32 },
+ { 152, 0x23 }, { 153, 0x41 }, { 154, 0x00 }, { 155, 0x4f },
+ { 253, 0x7e }, { 195, 0x30 }, { 196, 0x32 }, { 195, 0x31 },
+ { 196, 0x23 }, { 195, 0x32 }, { 196, 0x45 }, { 195, 0x35 },
+ { 196, 0x4a }, { 195, 0x36 }, { 196, 0x5a }, { 195, 0x37 },
+ { 196, 0x5a },
+};
+
+static const struct mt76_reg_pair mac_common_vals[] = {
+ { MT_LEGACY_BASIC_RATE, 0x0000013f },
+ { MT_HT_BASIC_RATE, 0x00008003 },
+ { MT_MAC_SYS_CTRL, 0x00000000 },
+ { MT_RX_FILTR_CFG, 0x00017f97 },
+ { MT_BKOFF_SLOT_CFG, 0x00000209 },
+ { MT_TX_SW_CFG0, 0x00000000 },
+ { MT_TX_SW_CFG1, 0x00080606 },
+ { MT_TX_LINK_CFG, 0x00001020 },
+ { MT_TX_TIMEOUT_CFG, 0x000a2090 },
+ { MT_MAX_LEN_CFG, 0x00003fff },
+ { MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f },
+ { MT_PBF_RX_MAX_PCNT, 0x0000009f },
+ { MT_TX_RETRY_CFG, 0x47d01f0f },
+ { MT_AUTO_RSP_CFG, 0x00000013 },
+ { MT_CCK_PROT_CFG, 0x05740003 },
+ { MT_OFDM_PROT_CFG, 0x05740003 },
+ { MT_MM40_PROT_CFG, 0x03f44084 },
+ { MT_GF20_PROT_CFG, 0x01744004 },
+ { MT_GF40_PROT_CFG, 0x03f44084 },
+ { MT_MM20_PROT_CFG, 0x01744004 },
+ { MT_TXOP_CTRL_CFG, 0x0000583f },
+ { MT_TX_RTS_CFG, 0x01092b20 },
+ { MT_EXP_ACK_TIME, 0x002400ca },
+ { MT_TXOP_HLDR_ET, 0x00000002 },
+ { MT_XIFS_TIME_CFG, 0x33a41010 },
+ { MT_PWR_PIN_CFG, 0x00000000 },
+};
+
+static const struct mt76_reg_pair mac_chip_vals[] = {
+ { MT_TSO_CTRL, 0x00006050 },
+ { MT_BCN_OFFSET(0), 0x18100800 },
+ { MT_BCN_OFFSET(1), 0x38302820 },
+ { MT_PBF_SYS_CTRL, 0x00080c00 },
+ { MT_PBF_CFG, 0x7f723c1f },
+ { MT_FCE_PSE_CTRL, 0x00000001 },
+ { MT_PAUSE_ENABLE_CONTROL1, 0x00000000 },
+ { MT_TX0_RF_GAIN_CORR, 0x003b0005 },
+ { MT_TX0_RF_GAIN_ATTEN, 0x00006900 },
+ { MT_TX0_BB_GAIN_ATTEN, 0x00000400 },
+ { MT_TX_ALC_VGA3, 0x00060006 },
+ { MT_TX_SW_CFG0, 0x00000402 },
+ { MT_TX_SW_CFG1, 0x00000000 },
+ { MT_TX_SW_CFG2, 0x00000000 },
+ { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
+ { MT_FCE_CSO, 0x0000030f },
+ { MT_FCE_PARAMETERS, 0x00256f0f },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h b/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h
new file mode 100644
index 000000000000..a2bdc3e322bf
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h
@@ -0,0 +1,291 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_PHY_INITVALS_H
+#define __MT7601U_PHY_INITVALS_H
+
+#define RF_REG_PAIR(bank, reg, value) \
+ { MT_MCU_MEMMAP_RF | (bank) << 16 | (reg), value }
+
+static const struct mt76_reg_pair rf_central[] = {
+ /* Bank 0 - for central blocks: BG, PLL, XTAL, LO, ADC/DAC */
+ RF_REG_PAIR(0, 0, 0x02),
+ RF_REG_PAIR(0, 1, 0x01),
+ RF_REG_PAIR(0, 2, 0x11),
+ RF_REG_PAIR(0, 3, 0xff),
+ RF_REG_PAIR(0, 4, 0x0a),
+ RF_REG_PAIR(0, 5, 0x20),
+ RF_REG_PAIR(0, 6, 0x00),
+ /* B/G */
+ RF_REG_PAIR(0, 7, 0x00),
+ RF_REG_PAIR(0, 8, 0x00),
+ RF_REG_PAIR(0, 9, 0x00),
+ RF_REG_PAIR(0, 10, 0x00),
+ RF_REG_PAIR(0, 11, 0x21),
+ /* XO */
+ RF_REG_PAIR(0, 13, 0x00), /* 40mhz xtal */
+ /* RF_REG_PAIR(0, 13, 0x13), */ /* 20mhz xtal */
+ RF_REG_PAIR(0, 14, 0x7c),
+ RF_REG_PAIR(0, 15, 0x22),
+ RF_REG_PAIR(0, 16, 0x80),
+ /* PLL */
+ RF_REG_PAIR(0, 17, 0x99),
+ RF_REG_PAIR(0, 18, 0x99),
+ RF_REG_PAIR(0, 19, 0x09),
+ RF_REG_PAIR(0, 20, 0x50),
+ RF_REG_PAIR(0, 21, 0xb0),
+ RF_REG_PAIR(0, 22, 0x00),
+ RF_REG_PAIR(0, 23, 0xc5),
+ RF_REG_PAIR(0, 24, 0xfc),
+ RF_REG_PAIR(0, 25, 0x40),
+ RF_REG_PAIR(0, 26, 0x4d),
+ RF_REG_PAIR(0, 27, 0x02),
+ RF_REG_PAIR(0, 28, 0x72),
+ RF_REG_PAIR(0, 29, 0x01),
+ RF_REG_PAIR(0, 30, 0x00),
+ RF_REG_PAIR(0, 31, 0x00),
+ /* test ports */
+ RF_REG_PAIR(0, 32, 0x00),
+ RF_REG_PAIR(0, 33, 0x00),
+ RF_REG_PAIR(0, 34, 0x23),
+ RF_REG_PAIR(0, 35, 0x01), /* change setting to reduce spurs */
+ RF_REG_PAIR(0, 36, 0x00),
+ RF_REG_PAIR(0, 37, 0x00),
+ /* ADC/DAC */
+ RF_REG_PAIR(0, 38, 0x00),
+ RF_REG_PAIR(0, 39, 0x20),
+ RF_REG_PAIR(0, 40, 0x00),
+ RF_REG_PAIR(0, 41, 0xd0),
+ RF_REG_PAIR(0, 42, 0x1b),
+ RF_REG_PAIR(0, 43, 0x02),
+ RF_REG_PAIR(0, 44, 0x00),
+};
+
+static const struct mt76_reg_pair rf_channel[] = {
+ RF_REG_PAIR(4, 0, 0x01),
+ RF_REG_PAIR(4, 1, 0x00),
+ RF_REG_PAIR(4, 2, 0x00),
+ RF_REG_PAIR(4, 3, 0x00),
+ /* LDO */
+ RF_REG_PAIR(4, 4, 0x00),
+ RF_REG_PAIR(4, 5, 0x08),
+ RF_REG_PAIR(4, 6, 0x00),
+ /* RX */
+ RF_REG_PAIR(4, 7, 0x5b),
+ RF_REG_PAIR(4, 8, 0x52),
+ RF_REG_PAIR(4, 9, 0xb6),
+ RF_REG_PAIR(4, 10, 0x57),
+ RF_REG_PAIR(4, 11, 0x33),
+ RF_REG_PAIR(4, 12, 0x22),
+ RF_REG_PAIR(4, 13, 0x3d),
+ RF_REG_PAIR(4, 14, 0x3e),
+ RF_REG_PAIR(4, 15, 0x13),
+ RF_REG_PAIR(4, 16, 0x22),
+ RF_REG_PAIR(4, 17, 0x23),
+ RF_REG_PAIR(4, 18, 0x02),
+ RF_REG_PAIR(4, 19, 0xa4),
+ RF_REG_PAIR(4, 20, 0x01),
+ RF_REG_PAIR(4, 21, 0x12),
+ RF_REG_PAIR(4, 22, 0x80),
+ RF_REG_PAIR(4, 23, 0xb3),
+ RF_REG_PAIR(4, 24, 0x00), /* reserved */
+ RF_REG_PAIR(4, 25, 0x00), /* reserved */
+ RF_REG_PAIR(4, 26, 0x00), /* reserved */
+ RF_REG_PAIR(4, 27, 0x00), /* reserved */
+ /* LOGEN */
+ RF_REG_PAIR(4, 28, 0x18),
+ RF_REG_PAIR(4, 29, 0xee),
+ RF_REG_PAIR(4, 30, 0x6b),
+ RF_REG_PAIR(4, 31, 0x31),
+ RF_REG_PAIR(4, 32, 0x5d),
+ RF_REG_PAIR(4, 33, 0x00), /* reserved */
+ /* TX */
+ RF_REG_PAIR(4, 34, 0x96),
+ RF_REG_PAIR(4, 35, 0x55),
+ RF_REG_PAIR(4, 36, 0x08),
+ RF_REG_PAIR(4, 37, 0xbb),
+ RF_REG_PAIR(4, 38, 0xb3),
+ RF_REG_PAIR(4, 39, 0xb3),
+ RF_REG_PAIR(4, 40, 0x03),
+ RF_REG_PAIR(4, 41, 0x00), /* reserved */
+ RF_REG_PAIR(4, 42, 0x00), /* reserved */
+ RF_REG_PAIR(4, 43, 0xc5),
+ RF_REG_PAIR(4, 44, 0xc5),
+ RF_REG_PAIR(4, 45, 0xc5),
+ RF_REG_PAIR(4, 46, 0x07),
+ RF_REG_PAIR(4, 47, 0xa8),
+ RF_REG_PAIR(4, 48, 0xef),
+ RF_REG_PAIR(4, 49, 0x1a),
+ /* PA */
+ RF_REG_PAIR(4, 54, 0x07),
+ RF_REG_PAIR(4, 55, 0xa7),
+ RF_REG_PAIR(4, 56, 0xcc),
+ RF_REG_PAIR(4, 57, 0x14),
+ RF_REG_PAIR(4, 58, 0x07),
+ RF_REG_PAIR(4, 59, 0xa8),
+ RF_REG_PAIR(4, 60, 0xd7),
+ RF_REG_PAIR(4, 61, 0x10),
+ RF_REG_PAIR(4, 62, 0x1c),
+ RF_REG_PAIR(4, 63, 0x00), /* reserved */
+};
+
+static const struct mt76_reg_pair rf_vga[] = {
+ RF_REG_PAIR(5, 0, 0x47),
+ RF_REG_PAIR(5, 1, 0x00),
+ RF_REG_PAIR(5, 2, 0x00),
+ RF_REG_PAIR(5, 3, 0x08),
+ RF_REG_PAIR(5, 4, 0x04),
+ RF_REG_PAIR(5, 5, 0x20),
+ RF_REG_PAIR(5, 6, 0x3a),
+ RF_REG_PAIR(5, 7, 0x3a),
+ RF_REG_PAIR(5, 8, 0x00),
+ RF_REG_PAIR(5, 9, 0x00),
+ RF_REG_PAIR(5, 10, 0x10),
+ RF_REG_PAIR(5, 11, 0x10),
+ RF_REG_PAIR(5, 12, 0x10),
+ RF_REG_PAIR(5, 13, 0x10),
+ RF_REG_PAIR(5, 14, 0x10),
+ RF_REG_PAIR(5, 15, 0x20),
+ RF_REG_PAIR(5, 16, 0x22),
+ RF_REG_PAIR(5, 17, 0x7c),
+ RF_REG_PAIR(5, 18, 0x00),
+ RF_REG_PAIR(5, 19, 0x00),
+ RF_REG_PAIR(5, 20, 0x00),
+ RF_REG_PAIR(5, 21, 0xf1),
+ RF_REG_PAIR(5, 22, 0x11),
+ RF_REG_PAIR(5, 23, 0x02),
+ RF_REG_PAIR(5, 24, 0x41),
+ RF_REG_PAIR(5, 25, 0x20),
+ RF_REG_PAIR(5, 26, 0x00),
+ RF_REG_PAIR(5, 27, 0xd7),
+ RF_REG_PAIR(5, 28, 0xa2),
+ RF_REG_PAIR(5, 29, 0x20),
+ RF_REG_PAIR(5, 30, 0x49),
+ RF_REG_PAIR(5, 31, 0x20),
+ RF_REG_PAIR(5, 32, 0x04),
+ RF_REG_PAIR(5, 33, 0xf1),
+ RF_REG_PAIR(5, 34, 0xa1),
+ RF_REG_PAIR(5, 35, 0x01),
+ RF_REG_PAIR(5, 41, 0x00),
+ RF_REG_PAIR(5, 42, 0x00),
+ RF_REG_PAIR(5, 43, 0x00),
+ RF_REG_PAIR(5, 44, 0x00),
+ RF_REG_PAIR(5, 45, 0x00),
+ RF_REG_PAIR(5, 46, 0x00),
+ RF_REG_PAIR(5, 47, 0x00),
+ RF_REG_PAIR(5, 48, 0x00),
+ RF_REG_PAIR(5, 49, 0x00),
+ RF_REG_PAIR(5, 50, 0x00),
+ RF_REG_PAIR(5, 51, 0x00),
+ RF_REG_PAIR(5, 52, 0x00),
+ RF_REG_PAIR(5, 53, 0x00),
+ RF_REG_PAIR(5, 54, 0x00),
+ RF_REG_PAIR(5, 55, 0x00),
+ RF_REG_PAIR(5, 56, 0x00),
+ RF_REG_PAIR(5, 57, 0x00),
+ RF_REG_PAIR(5, 58, 0x31),
+ RF_REG_PAIR(5, 59, 0x31),
+ RF_REG_PAIR(5, 60, 0x0a),
+ RF_REG_PAIR(5, 61, 0x02),
+ RF_REG_PAIR(5, 62, 0x00),
+ RF_REG_PAIR(5, 63, 0x00),
+};
+
+/* TODO: BBP178 is set to 0xff for "CCK CH14 OBW" which overrides the settings
+ * from channel switching. Seems stupid at best.
+ */
+static const struct mt76_reg_pair bbp_high_temp[] = {
+ { 75, 0x60 },
+ { 92, 0x02 },
+ { 178, 0xff }, /* For CCK CH14 OBW */
+ { 195, 0x88 }, { 196, 0x60 },
+}, bbp_high_temp_bw20[] = {
+ { 69, 0x12 },
+ { 91, 0x07 },
+ { 195, 0x23 }, { 196, 0x17 },
+ { 195, 0x24 }, { 196, 0x06 },
+ { 195, 0x81 }, { 196, 0x12 },
+ { 195, 0x83 }, { 196, 0x17 },
+}, bbp_high_temp_bw40[] = {
+ { 69, 0x15 },
+ { 91, 0x04 },
+ { 195, 0x23 }, { 196, 0x12 },
+ { 195, 0x24 }, { 196, 0x08 },
+ { 195, 0x81 }, { 196, 0x15 },
+ { 195, 0x83 }, { 196, 0x16 },
+}, bbp_low_temp[] = {
+ { 178, 0xff }, /* For CCK CH14 OBW */
+}, bbp_low_temp_bw20[] = {
+ { 69, 0x12 },
+ { 75, 0x5e },
+ { 91, 0x07 },
+ { 92, 0x02 },
+ { 195, 0x23 }, { 196, 0x17 },
+ { 195, 0x24 }, { 196, 0x06 },
+ { 195, 0x81 }, { 196, 0x12 },
+ { 195, 0x83 }, { 196, 0x17 },
+ { 195, 0x88 }, { 196, 0x5e },
+}, bbp_low_temp_bw40[] = {
+ { 69, 0x15 },
+ { 75, 0x5c },
+ { 91, 0x04 },
+ { 92, 0x03 },
+ { 195, 0x23 }, { 196, 0x10 },
+ { 195, 0x24 }, { 196, 0x08 },
+ { 195, 0x81 }, { 196, 0x15 },
+ { 195, 0x83 }, { 196, 0x16 },
+ { 195, 0x88 }, { 196, 0x5b },
+}, bbp_normal_temp[] = {
+ { 75, 0x60 },
+ { 92, 0x02 },
+ { 178, 0xff }, /* For CCK CH14 OBW */
+ { 195, 0x88 }, { 196, 0x60 },
+}, bbp_normal_temp_bw20[] = {
+ { 69, 0x12 },
+ { 91, 0x07 },
+ { 195, 0x23 }, { 196, 0x17 },
+ { 195, 0x24 }, { 196, 0x06 },
+ { 195, 0x81 }, { 196, 0x12 },
+ { 195, 0x83 }, { 196, 0x17 },
+}, bbp_normal_temp_bw40[] = {
+ { 69, 0x15 },
+ { 91, 0x04 },
+ { 195, 0x23 }, { 196, 0x12 },
+ { 195, 0x24 }, { 196, 0x08 },
+ { 195, 0x81 }, { 196, 0x15 },
+ { 195, 0x83 }, { 196, 0x16 },
+};
+
+#define BBP_TABLE(arr) { arr, ARRAY_SIZE(arr), }
+
+static const struct reg_table {
+ const struct mt76_reg_pair *regs;
+ size_t n;
+} bbp_mode_table[3][3] = {
+ {
+ BBP_TABLE(bbp_normal_temp_bw20),
+ BBP_TABLE(bbp_normal_temp_bw40),
+ BBP_TABLE(bbp_normal_temp),
+ }, {
+ BBP_TABLE(bbp_high_temp_bw20),
+ BBP_TABLE(bbp_high_temp_bw40),
+ BBP_TABLE(bbp_high_temp),
+ }, {
+ BBP_TABLE(bbp_low_temp_bw20),
+ BBP_TABLE(bbp_low_temp_bw40),
+ BBP_TABLE(bbp_low_temp),
+ }
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
new file mode 100644
index 000000000000..c161bcc6a7fa
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -0,0 +1,569 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "trace.h"
+#include <linux/etherdevice.h>
+
+static void
+mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
+{
+ u8 idx = MT76_GET(MT_TXWI_RATE_MCS, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (MT76_GET(MT_TXWI_RATE_PHY_MODE, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ txrate->idx = idx + 4;
+ return;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (MT76_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40)
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+
+ if (rate & MT_TXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+}
+
+static void
+mt76_mac_fill_tx_status(struct mt7601u_dev *dev, struct ieee80211_tx_info *info,
+ struct mt76_tx_status *st)
+{
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ int cur_idx, last_rate;
+ int i;
+
+ last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+ mt76_mac_process_tx_rate(&rate[last_rate], st->rate);
+ if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+ rate[last_rate + 1].idx = -1;
+
+ cur_idx = rate[last_rate].idx + st->retry;
+ for (i = 0; i <= last_rate; i++) {
+ rate[i].flags = rate[last_rate].flags;
+ rate[i].idx = max_t(int, 0, cur_idx - i);
+ rate[i].count = 1;
+ }
+
+ if (last_rate > 0)
+ rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = st->success;
+
+ if (st->is_probe)
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+ u16 rateval;
+ u8 phy, rate_idx;
+ u8 nss = 1;
+ u8 bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->chandef.chan->band;
+ u16 val;
+
+ r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ bw = 0;
+ }
+
+ rateval = MT76_SET(MT_RXWI_RATE_MCS, rate_idx);
+ rateval |= MT76_SET(MT_RXWI_RATE_PHY, phy);
+ rateval |= MT76_SET(MT_RXWI_RATE_BW, bw);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rateval |= MT_RXWI_RATE_SGI;
+
+ *nss_val = nss;
+ return rateval;
+}
+
+void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ wcid->tx_rate = mt76_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+ wcid->tx_rate_set = true;
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+struct mt76_tx_status mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev)
+{
+ struct mt76_tx_status stat = {};
+ u32 val;
+
+ val = mt7601u_rr(dev, MT_TX_STAT_FIFO);
+ stat.valid = !!(val & MT_TX_STAT_FIFO_VALID);
+ stat.success = !!(val & MT_TX_STAT_FIFO_SUCCESS);
+ stat.aggr = !!(val & MT_TX_STAT_FIFO_AGGR);
+ stat.ack_req = !!(val & MT_TX_STAT_FIFO_ACKREQ);
+ stat.pktid = MT76_GET(MT_TX_STAT_FIFO_PID_TYPE, val);
+ stat.wcid = MT76_GET(MT_TX_STAT_FIFO_WCID, val);
+ stat.rate = MT76_GET(MT_TX_STAT_FIFO_RATE, val);
+
+ return stat;
+}
+
+void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ void *msta;
+
+ rcu_read_lock();
+ if (stat->wcid < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+ if (wcid) {
+ msta = container_of(wcid, struct mt76_sta, wcid);
+ sta = container_of(msta, struct ieee80211_sta,
+ drv_priv);
+ }
+
+ mt76_mac_fill_tx_status(dev, &info, stat);
+ ieee80211_tx_status_noskb(dev->hw, sta, &info);
+ rcu_read_unlock();
+}
+
+void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
+ int ht_mode)
+{
+ int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ u32 prot[6];
+ bool ht_rts[4] = {};
+ int i;
+
+ prot[0] = MT_PROT_NAV_SHORT |
+ MT_PROT_TXOP_ALLOW_ALL |
+ MT_PROT_RTS_THR_EN;
+ prot[1] = prot[0];
+ if (legacy_prot)
+ prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+ prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
+ prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
+
+ if (legacy_prot) {
+ prot[2] |= MT_PROT_RATE_CCK_11;
+ prot[3] |= MT_PROT_RATE_CCK_11;
+ prot[4] |= MT_PROT_RATE_CCK_11;
+ prot[5] |= MT_PROT_RATE_CCK_11;
+ } else {
+ prot[2] |= MT_PROT_RATE_OFDM_24;
+ prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+ prot[4] |= MT_PROT_RATE_OFDM_24;
+ prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+ }
+
+ switch (mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ ht_rts[1] = ht_rts[3] = true;
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+ break;
+ }
+
+ if (non_gf)
+ ht_rts[2] = ht_rts[3] = true;
+
+ for (i = 0; i < 4; i++)
+ if (ht_rts[i])
+ prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
+
+ for (i = 0; i < 6; i++)
+ mt7601u_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+}
+
+void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb)
+{
+ if (short_preamb)
+ mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+ else
+ mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+}
+
+void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval)
+{
+ u32 val = mt7601u_rr(dev, MT_BEACON_TIME_CFG);
+
+ val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN);
+
+ if (!enable) {
+ mt7601u_wr(dev, MT_BEACON_TIME_CFG, val);
+ return;
+ }
+
+ val &= ~MT_BEACON_TIME_CFG_INTVAL;
+ val |= MT76_SET(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
+ MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN;
+}
+
+static void mt7601u_check_mac_err(struct mt7601u_dev *dev)
+{
+ u32 val = mt7601u_rr(dev, 0x10f4);
+
+ if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+ return;
+
+ dev_err(dev->dev, "Error: MAC specific condition occurred\n");
+
+ mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+ udelay(10);
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+}
+
+void mt7601u_mac_work(struct work_struct *work)
+{
+ struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+ mac_work.work);
+ struct {
+ u32 addr_base;
+ u32 span;
+ u64 *stat_base;
+ } spans[] = {
+ { MT_RX_STA_CNT0, 3, dev->stats.rx_stat },
+ { MT_TX_STA_CNT0, 3, dev->stats.tx_stat },
+ { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat },
+ { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del },
+ { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] },
+ { MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] },
+ };
+ u32 sum, n;
+ int i, j, k;
+
+ /* Note: using MCU_RANDOM_READ is actually slower then reading all the
+ * registers by hand. MCU takes ca. 20ms to complete read of 24
+ * registers while reading them one by one will takes roughly
+ * 24*200us =~ 5ms.
+ */
+
+ k = 0;
+ n = 0;
+ sum = 0;
+ for (i = 0; i < ARRAY_SIZE(spans); i++)
+ for (j = 0; j < spans[i].span; j++) {
+ u32 val = mt7601u_rr(dev, spans[i].addr_base + j * 4);
+
+ spans[i].stat_base[j * 2] += val & 0xffff;
+ spans[i].stat_base[j * 2 + 1] += val >> 16;
+
+ /* Calculate average AMPDU length */
+ if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
+ spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
+ continue;
+
+ n += (val >> 16) + (val & 0xffff);
+ sum += (val & 0xffff) * (1 + k * 2) +
+ (val >> 16) * (2 + k * 2);
+ k++;
+ }
+
+ atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
+
+ mt7601u_check_mac_err(dev);
+
+ ieee80211_queue_delayed_work(dev->hw, &dev->mac_work, 10 * HZ);
+}
+
+void
+mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+ u8 zmac[ETH_ALEN] = {};
+ u32 attr;
+
+ attr = MT76_SET(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ MT76_SET(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ if (mac)
+ memcpy(zmac, mac, sizeof(zmac));
+
+ mt7601u_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
+}
+
+void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev)
+{
+ struct ieee80211_sta *sta;
+ struct mt76_wcid *wcid;
+ void *msta;
+ u8 min_factor = 3;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
+ wcid = rcu_dereference(dev->wcid[i]);
+ if (!wcid)
+ continue;
+
+ msta = container_of(wcid, struct mt76_sta, wcid);
+ sta = container_of(msta, struct ieee80211_sta, drv_priv);
+
+ min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
+ }
+ rcu_read_unlock();
+
+ mt7601u_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
+ MT76_SET(MT_MAX_LEN_CFG_AMPDU, min_factor));
+}
+
+static void
+mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
+{
+ u8 idx = MT76_GET(MT_RXWI_RATE_MCS, rate);
+
+ switch (MT76_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (WARN_ON(idx >= 8))
+ idx = 0;
+ idx += 4;
+
+ status->rate_idx = idx;
+ return;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8) {
+ idx -= 8;
+ status->flag |= RX_FLAG_SHORTPRE;
+ }
+
+ if (WARN_ON(idx >= 4))
+ idx = 0;
+
+ status->rate_idx = idx;
+ return;
+ case MT_PHY_TYPE_HT_GF:
+ status->flag |= RX_FLAG_HT_GF;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ status->flag |= RX_FLAG_HT;
+ status->rate_idx = idx;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ status->flag |= RX_FLAG_SHORT_GI;
+
+ if (rate & MT_RXWI_RATE_STBC)
+ status->flag |= 1 << RX_FLAG_STBC_SHIFT;
+
+ if (rate & MT_RXWI_RATE_BW)
+ status->flag |= RX_FLAG_40MHZ;
+}
+
+static void
+mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
+ u16 rate, int rssi)
+{
+ dev->bcn_freq_off = rxwi->freq_off;
+ dev->bcn_phy_mode = MT76_GET(MT_RXWI_RATE_PHY, rate);
+ dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
+}
+
+static int
+mt7601u_rx_is_our_beacon(struct mt7601u_dev *dev, u8 *data)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
+
+ return ieee80211_is_beacon(hdr->frame_control) &&
+ ether_addr_equal(hdr->addr2, dev->ap_bssid);
+}
+
+u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
+ u8 *data, void *rxi)
+{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct mt7601u_rxwi *rxwi = rxi;
+ u32 ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ int rssi;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
+ }
+
+ status->chains = BIT(0);
+ rssi = mt7601u_phy_get_rssi(dev, rxwi, rate);
+ status->chain_signal[0] = status->signal = rssi;
+ status->freq = dev->chandef.chan->center_freq;
+ status->band = dev->chandef.chan->band;
+
+ mt76_mac_process_rate(status, rate);
+
+ spin_lock_bh(&dev->con_mon_lock);
+ if (mt7601u_rx_is_our_beacon(dev, data))
+ mt7601u_rx_monitor_beacon(dev, rxwi, rate, rssi);
+ else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M))
+ dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ return MT76_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+}
+
+static enum mt76_cipher_type
+mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76_cipher_type cipher;
+ u8 key_data[32];
+ u8 iv_data[8];
+ u32 val;
+
+ cipher = mt76_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EINVAL;
+
+ trace_set_key(dev, idx);
+
+ mt7601u_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+ memset(iv_data, 0, sizeof(iv_data));
+ if (key) {
+ iv_data[3] = key->keyidx << 6;
+ if (cipher >= MT_CIPHER_TKIP) {
+ /* Note: start with 1 to comply with spec,
+ * (see comment on common/cmm_wpa.c:4291).
+ */
+ iv_data[0] |= 1;
+ iv_data[3] |= 0x20;
+ }
+ }
+ mt7601u_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+ val = mt7601u_rr(dev, MT_WCID_ATTR(idx));
+ val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
+ val |= MT76_SET(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
+ MT76_SET(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
+ val &= ~MT_WCID_ATTR_PAIRWISE;
+ val |= MT_WCID_ATTR_PAIRWISE *
+ !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ mt7601u_wr(dev, MT_WCID_ATTR(idx), val);
+
+ return 0;
+}
+
+int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76_cipher_type cipher;
+ u8 key_data[32];
+ u32 val;
+
+ cipher = mt76_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EINVAL;
+
+ trace_set_shared_key(dev, vif_idx, key_idx);
+
+ mt7601u_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
+ key_data, sizeof(key_data));
+
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+ val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.h b/drivers/net/wireless/mediatek/mt7601u/mac.h
new file mode 100644
index 000000000000..2c22d63c63a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_MAC_H
+#define __MT76_MAC_H
+
+struct mt76_tx_status {
+ u8 valid:1;
+ u8 success:1;
+ u8 aggr:1;
+ u8 ack_req:1;
+ u8 is_probe:1;
+ u8 wcid;
+ u8 pktid;
+ u8 retry;
+ u16 rate;
+} __packed __aligned(2);
+
+/* Note: values in original "RSSI" and "SNR" fields are not actually what they
+ * are called for MT7601U, names used by this driver are educated guesses
+ * (see vendor mac/ral_omac.c).
+ */
+struct mt7601u_rxwi {
+ __le32 rxinfo;
+
+ __le32 ctl;
+
+ __le16 frag_sn;
+ __le16 rate;
+
+ u8 unknown;
+ u8 zero[3];
+
+ u8 snr;
+ u8 ant;
+ u8 gain;
+ u8 freq_off;
+
+ __le32 resv2;
+ __le32 expert_ant;
+} __packed __aligned(4);
+
+#define MT_RXINFO_BA BIT(0)
+#define MT_RXINFO_DATA BIT(1)
+#define MT_RXINFO_NULL BIT(2)
+#define MT_RXINFO_FRAG BIT(3)
+#define MT_RXINFO_U2M BIT(4)
+#define MT_RXINFO_MULTICAST BIT(5)
+#define MT_RXINFO_BROADCAST BIT(6)
+#define MT_RXINFO_MYBSS BIT(7)
+#define MT_RXINFO_CRCERR BIT(8)
+#define MT_RXINFO_ICVERR BIT(9)
+#define MT_RXINFO_MICERR BIT(10)
+#define MT_RXINFO_AMSDU BIT(11)
+#define MT_RXINFO_HTC BIT(12)
+#define MT_RXINFO_RSSI BIT(13)
+#define MT_RXINFO_L2PAD BIT(14)
+#define MT_RXINFO_AMPDU BIT(15)
+#define MT_RXINFO_DECRYPT BIT(16)
+#define MT_RXINFO_BSSIDX3 BIT(17)
+#define MT_RXINFO_WAPI_KEY BIT(18)
+#define MT_RXINFO_PN_LEN GENMASK(21, 19)
+#define MT_RXINFO_SW_PKT_80211 BIT(22)
+#define MT_RXINFO_TCP_SUM_BYPASS BIT(28)
+#define MT_RXINFO_IP_SUM_BYPASS BIT(29)
+#define MT_RXINFO_TCP_SUM_ERR BIT(30)
+#define MT_RXINFO_IP_SUM_ERR BIT(31)
+
+#define MT_RXWI_CTL_WCID GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN GENMASK(27, 16)
+#define MT_RXWI_CTL_TID GENMASK(31, 28)
+
+#define MT_RXWI_FRAG GENMASK(3, 0)
+#define MT_RXWI_SN GENMASK(15, 4)
+
+#define MT_RXWI_RATE_MCS GENMASK(6, 0)
+#define MT_RXWI_RATE_BW BIT(7)
+#define MT_RXWI_RATE_SGI BIT(8)
+#define MT_RXWI_RATE_STBC GENMASK(10, 9)
+#define MT_RXWI_RATE_ETXBF BIT(11)
+#define MT_RXWI_RATE_SND BIT(12)
+#define MT_RXWI_RATE_ITXBF BIT(13)
+#define MT_RXWI_RATE_PHY GENMASK(15, 14)
+
+#define MT_RXWI_GAIN_RSSI_VAL GENMASK(5, 0)
+#define MT_RXWI_GAIN_RSSI_LNA_ID GENMASK(7, 6)
+#define MT_RXWI_ANT_AUX_LNA BIT(7)
+
+#define MT_RXWI_EANT_ENC_ANT_ID GENMASK(7, 0)
+
+enum mt76_phy_type {
+ MT_PHY_TYPE_CCK,
+ MT_PHY_TYPE_OFDM,
+ MT_PHY_TYPE_HT,
+ MT_PHY_TYPE_HT_GF,
+};
+
+enum mt76_phy_bandwidth {
+ MT_PHY_BW_20,
+ MT_PHY_BW_40,
+};
+
+struct mt76_txwi {
+ __le16 flags;
+ __le16 rate_ctl;
+
+ u8 ack_ctl;
+ u8 wcid;
+ __le16 len_ctl;
+
+ __le32 iv;
+
+ __le32 eiv;
+
+ u8 aid;
+ u8 txstream;
+ __le16 ctl;
+} __packed __aligned(4);
+
+#define MT_TXWI_FLAGS_FRAG BIT(0)
+#define MT_TXWI_FLAGS_MMPS BIT(1)
+#define MT_TXWI_FLAGS_CFACK BIT(2)
+#define MT_TXWI_FLAGS_TS BIT(3)
+#define MT_TXWI_FLAGS_AMPDU BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8)
+#define MT_TXWI_FLAGS_CWMIN GENMASK(12, 10)
+#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13)
+#define MT_TXWI_FLAGS_TX_RPT BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15)
+
+#define MT_TXWI_RATE_MCS GENMASK(6, 0)
+#define MT_TXWI_RATE_BW BIT(7)
+#define MT_TXWI_RATE_SGI BIT(8)
+#define MT_TXWI_RATE_STBC GENMASK(10, 9)
+#define MT_TXWI_RATE_PHY_MODE GENMASK(15, 14)
+
+#define MT_TXWI_ACK_CTL_REQ BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
+
+#define MT_TXWI_LEN_BYTE_CNT GENMASK(11, 0)
+#define MT_TXWI_LEN_PKTID GENMASK(15, 12)
+
+#define MT_TXWI_CTL_TX_POWER_ADJ GENMASK(3, 0)
+#define MT_TXWI_CTL_CHAN_CHECK_PKT BIT(4)
+#define MT_TXWI_CTL_PIFS_REV BIT(6)
+
+u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
+ u8 *data, void *rxi);
+int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
+void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate);
+
+int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key);
+u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val);
+struct mt76_tx_status
+mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev);
+void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
new file mode 100644
index 000000000000..ced82abb414f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "mac.h"
+#include <linux/etherdevice.h>
+#include <linux/version.h>
+
+static int mt7601u_start(struct ieee80211_hw *hw)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ ret = mt7601u_mac_start(dev);
+ if (ret)
+ goto out;
+
+ ieee80211_queue_delayed_work(dev->hw, &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
+ ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void mt7601u_stop(struct ieee80211_hw *hw)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mac_work);
+ mt7601u_mac_stop(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int mt7601u_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ unsigned int idx = 0;
+ unsigned int wcid = GROUP_WCID(idx);
+
+ /* Note: for AP do the AP-STA things mt76 does:
+ * - beacon offsets
+ * - do mac address tricks
+ * - shift vif idx
+ */
+ mvif->idx = idx;
+
+ if (dev->wcid_mask[wcid / BITS_PER_LONG] & BIT(wcid % BITS_PER_LONG))
+ return -ENOSPC;
+ dev->wcid_mask[wcid / BITS_PER_LONG] |= BIT(wcid % BITS_PER_LONG);
+ mvif->group_wcid.idx = wcid;
+ mvif->group_wcid.hw_key_idx = -1;
+
+ return 0;
+}
+
+static void mt7601u_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ unsigned int wcid = mvif->group_wcid.idx;
+
+ dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+}
+
+static int mt7601u_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ ret = mt7601u_phy_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static void
+mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mutex);
+
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static void
+mt7601u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt7601u_phy_con_cal_onoff(dev, info);
+
+ if (changed & BSS_CHANGED_BSSID) {
+ mt7601u_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
+
+ /* Note: this is a hack because beacon_int is not changed
+ * on leave nor is any more appropriate event generated.
+ * rt2x00 doesn't seem to be bothered though.
+ */
+ if (is_zero_ether_addr(info->bssid))
+ mt7601u_mac_config_tsf(dev, false, 0);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ mt7601u_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
+ mt7601u_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
+ mt7601u_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
+ mt7601u_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
+ mt7601u_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INT)
+ mt7601u_mac_config_tsf(dev, true, info->beacon_int);
+
+ if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+ mt7601u_mac_set_protection(dev, info->use_cts_prot,
+ info->ht_operation_mode);
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE)
+ mt7601u_mac_set_short_preamble(dev, info->use_short_preamble);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+ MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt7601u_phy_recalibrate_after_assoc(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76_wcid_alloc(struct mt7601u_dev *dev)
+{
+ int i, idx = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+ idx = ffs(~dev->wcid_mask[i]);
+ if (!idx)
+ continue;
+
+ idx--;
+ dev->wcid_mask[i] |= BIT(idx);
+ break;
+ }
+
+ idx = i * BITS_PER_LONG + idx;
+ if (idx > 119)
+ return -1;
+
+ return idx;
+}
+
+static int
+mt7601u_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ int ret = 0;
+ int idx = 0;
+
+ mutex_lock(&dev->mutex);
+
+ idx = mt76_wcid_alloc(dev);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt7601u_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+ rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+ mt7601u_mac_set_ampdu_factor(dev);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static int
+mt7601u_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ int idx = msta->wcid.idx;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[idx], NULL);
+ mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+ dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+ mt7601u_mac_wcid_setup(dev, idx, 0, NULL);
+ mt7601u_mac_set_ampdu_factor(dev);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
+static void
+mt7601u_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+}
+
+static void
+mt7601u_sw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mt7601u_agc_save(dev);
+ set_bit(MT7601U_STATE_SCANNING, &dev->state);
+}
+
+static void
+mt7601u_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mt7601u_agc_restore(dev);
+ clear_bit(MT7601U_STATE_SCANNING, &dev->state);
+}
+
+static int
+mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
+ struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else {
+ if (idx == wcid->hw_key_idx)
+ wcid->hw_key_idx = -1;
+
+ key = NULL;
+ }
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
+
+ return 0;
+}
+
+static int
+mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+
+ WARN_ON(msta->wcid.idx > GROUP_WCID(0));
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+ BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ msta->agg_ssn[tid] = *ssn << 4;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ struct ieee80211_sta_rates *rates;
+ struct ieee80211_tx_rate rate = {};
+
+ rcu_read_lock();
+ rates = rcu_dereference(sta->rates);
+
+ if (!rates)
+ goto out;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+
+out:
+ rcu_read_unlock();
+}
+
+const struct ieee80211_ops mt7601u_ops = {
+ .tx = mt7601u_tx,
+ .start = mt7601u_start,
+ .stop = mt7601u_stop,
+ .add_interface = mt7601u_add_interface,
+ .remove_interface = mt7601u_remove_interface,
+ .config = mt7601u_config,
+ .configure_filter = mt76_configure_filter,
+ .bss_info_changed = mt7601u_bss_info_changed,
+ .sta_add = mt7601u_sta_add,
+ .sta_remove = mt7601u_sta_remove,
+ .sta_notify = mt7601u_sta_notify,
+ .set_key = mt7601u_set_key,
+ .conf_tx = mt7601u_conf_tx,
+ .sw_scan_start = mt7601u_sw_scan,
+ .sw_scan_complete = mt7601u_sw_scan_complete,
+ .ampdu_action = mt76_ampdu_action,
+ .sta_rate_tbl_update = mt76_sta_rate_tbl_update,
+ .set_rts_threshold = mt7601u_set_rts_threshold,
+};
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c
new file mode 100644
index 000000000000..fbb1986eda3c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c
@@ -0,0 +1,534 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include "mt7601u.h"
+#include "dma.h"
+#include "mcu.h"
+#include "usb.h"
+#include "trace.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD 0x3800
+#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
+#define MCU_RESP_URB_SIZE 1024
+
+static inline int firmware_running(struct mt7601u_dev *dev)
+{
+ return mt7601u_rr(dev, MT_MCU_COM_REG0) == 1;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+ put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb,
+ u8 seq, enum mcu_cmd cmd)
+{
+ WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
+ MT76_SET(MT_TXD_CMD_INFO_SEQ, seq) |
+ MT76_SET(MT_TXD_CMD_INFO_TYPE, cmd)));
+}
+
+static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev,
+ struct sk_buff *skb, bool need_resp)
+{
+ u32 i, csum = 0;
+
+ for (i = 0; i < skb->len / 4; i++)
+ csum ^= get_unaligned_le32(skb->data + i * 4);
+
+ trace_mt_mcu_msg_send(dev, skb, csum, need_resp);
+}
+
+static struct sk_buff *
+mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
+
+ skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ memcpy(skb_put(skb, len), data, len);
+
+ return skb;
+}
+
+static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq)
+{
+ struct urb *urb = dev->mcu.resp.urb;
+ u32 rxfce;
+ int urb_status, ret, i = 5;
+
+ while (i--) {
+ if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
+ msecs_to_jiffies(300))) {
+ dev_warn(dev->dev, "Warning: %s retrying\n", __func__);
+ continue;
+ }
+
+ /* Make copies of important data before reusing the urb */
+ rxfce = get_unaligned_le32(dev->mcu.resp.buf);
+ urb_status = urb->status * mt7601u_urb_has_error(urb);
+
+ ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &dev->mcu.resp, GFP_KERNEL,
+ mt7601u_complete_urb,
+ &dev->mcu.resp_cmpl);
+ if (ret)
+ return ret;
+
+ if (urb_status)
+ dev_err(dev->dev, "Error: MCU resp urb failed:%d\n",
+ urb_status);
+
+ if (MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
+ MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
+ return 0;
+
+ dev_err(dev->dev, "Error: MCU resp evt:%hhx seq:%hhx-%hhx!\n",
+ MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
+ seq, MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
+ }
+
+ dev_err(dev->dev, "Error: %s timed out\n", __func__);
+ return -ETIMEDOUT;
+}
+
+static int
+mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb,
+ enum mcu_cmd cmd, bool wait_resp)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
+ dev->out_eps[MT_EP_OUT_INBAND_CMD]);
+ int sent, ret;
+ u8 seq = 0;
+
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return 0;
+
+ mutex_lock(&dev->mcu.mutex);
+
+ if (wait_resp)
+ while (!seq)
+ seq = ++dev->mcu.msg_seq & 0xf;
+
+ mt7601u_dma_skb_wrap_cmd(skb, seq, cmd);
+
+ if (dev->mcu.resp_cmpl.done)
+ dev_err(dev->dev, "Error: MCU response pre-completed!\n");
+
+ trace_mt_mcu_msg_send_cs(dev, skb, wait_resp);
+ trace_mt_submit_urb_sync(dev, cmd_pipe, skb->len);
+ ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
+ if (ret) {
+ dev_err(dev->dev, "Error: send MCU cmd failed:%d\n", ret);
+ goto out;
+ }
+ if (sent != skb->len)
+ dev_err(dev->dev, "Error: %s sent != skb->len\n", __func__);
+
+ if (wait_resp)
+ ret = mt7601u_mcu_wait_resp(dev, seq);
+out:
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+
+static int mt7601u_mcu_function_select(struct mt7601u_dev *dev,
+ enum mcu_function func, u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
+}
+
+int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga)
+{
+ int ret;
+
+ if (!test_bit(MT7601U_STATE_MCU_RUNNING, &dev->state))
+ return 0;
+
+ ret = mt7601u_mcu_function_select(dev, ATOMIC_TSSI_SETTING,
+ use_hvga);
+ if (ret) {
+ dev_warn(dev->dev, "Warning: MCU TSSI read kick failed\n");
+ return ret;
+ }
+
+ dev->tssi_read_trig = true;
+
+ return 0;
+}
+
+int
+mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(cal),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ ret = mt7601u_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt7601u_write_reg_pairs(dev, base, data + cnt, n - cnt);
+}
+
+int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
+ const u32 *data, int n)
+{
+ const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_regs_per_cmd, n);
+
+ skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
+ for (i = 0; i < cnt; i++)
+ skb_put_le32(skb, data[i]);
+
+ ret = mt7601u_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt7601u_burst_write_regs(dev, offset + cnt * 4,
+ data + cnt, n - cnt);
+}
+
+struct mt76_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76_fw {
+ struct mt76_fw_header hdr;
+ u8 ivb[MT_MCU_IVB_SIZE];
+ u8 ilm[];
+};
+
+static int __mt7601u_dma_fw(struct mt7601u_dev *dev,
+ const struct mt7601u_dma_buf *dma_buf,
+ const void *data, u32 len, u32 dst_addr)
+{
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ struct mt7601u_dma_buf buf = *dma_buf; /* we need to fake length */
+ __le32 reg;
+ u32 val;
+ int ret;
+
+ reg = cpu_to_le32(MT76_SET(MT_TXD_INFO_TYPE, DMA_PACKET) |
+ MT76_SET(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
+ MT76_SET(MT_TXD_INFO_LEN, len));
+ memcpy(buf.buf, &reg, sizeof(reg));
+ memcpy(buf.buf + sizeof(reg), data, len);
+ memset(buf.buf + sizeof(reg) + len, 0, 8);
+
+ ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_ADDR, dst_addr);
+ if (ret)
+ return ret;
+ len = roundup(len, 4);
+ ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_LEN, len << 16);
+ if (ret)
+ return ret;
+
+ buf.len = MT_DMA_HDR_LEN + len + 4;
+ ret = mt7601u_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
+ &buf, GFP_KERNEL,
+ mt7601u_complete_urb, &cmpl);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
+ dev_err(dev->dev, "Error: firmware upload timed out\n");
+ usb_kill_urb(buf.urb);
+ return -ETIMEDOUT;
+ }
+ if (mt7601u_urb_has_error(buf.urb)) {
+ dev_err(dev->dev, "Error: firmware upload urb failed:%d\n",
+ buf.urb->status);
+ return buf.urb->status;
+ }
+
+ val = mt7601u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val++;
+ mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+ return 0;
+}
+
+static int
+mt7601u_dma_fw(struct mt7601u_dev *dev, struct mt7601u_dma_buf *dma_buf,
+ const void *data, int len, u32 dst_addr)
+{
+ int n, ret;
+
+ if (len == 0)
+ return 0;
+
+ n = min(MCU_FW_URB_MAX_PAYLOAD, len);
+ ret = __mt7601u_dma_fw(dev, dma_buf, data, n, dst_addr);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
+ return -ETIMEDOUT;
+
+ return mt7601u_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
+}
+
+static int
+mt7601u_upload_firmware(struct mt7601u_dev *dev, const struct mt76_fw *fw)
+{
+ struct mt7601u_dma_buf dma_buf;
+ void *ivb;
+ u32 ilm_len, dlm_len;
+ int i, ret;
+
+ ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
+ if (!ivb || mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
+ dev_dbg(dev->dev, "loading FW - ILM %u + IVB %zu\n",
+ ilm_len, sizeof(fw->ivb));
+ ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
+ if (ret)
+ goto error;
+
+ dlm_len = le32_to_cpu(fw->hdr.dlm_len);
+ dev_dbg(dev->dev, "loading FW - DLM %u\n", dlm_len);
+ ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
+ dlm_len, MT_MCU_DLM_OFFSET);
+ if (ret)
+ goto error;
+
+ ret = mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+ 0x12, 0, ivb, sizeof(fw->ivb));
+ if (ret < 0)
+ goto error;
+ ret = 0;
+
+ for (i = 100; i && !firmware_running(dev); i--)
+ msleep(10);
+ if (!i) {
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ dev_dbg(dev->dev, "Firmware running!\n");
+error:
+ kfree(ivb);
+ mt7601u_usb_free_buf(dev, &dma_buf);
+
+ return ret;
+}
+
+static int mt7601u_load_firmware(struct mt7601u_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt76_fw_header *hdr;
+ int len, ret;
+ u32 val;
+
+ mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN));
+
+ if (firmware_running(dev))
+ return 0;
+
+ ret = request_firmware(&fw, MT7601U_FIRMWARE, dev->dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr))
+ goto err_inv_fw;
+
+ hdr = (const struct mt76_fw_header *) fw->data;
+
+ if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+ goto err_inv_fw;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len)
+ goto err_inv_fw;
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_info(dev->dev,
+ "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+ le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+ len = le32_to_cpu(hdr->ilm_len);
+
+ mt7601u_wr(dev, 0x94c, 0);
+ mt7601u_wr(dev, MT_FCE_PSE_CTRL, 0);
+
+ mt7601u_vendor_reset(dev);
+ msleep(5);
+
+ mt7601u_wr(dev, 0xa44, 0);
+ mt7601u_wr(dev, 0x230, 0x84210);
+ mt7601u_wr(dev, 0x400, 0x80c00);
+ mt7601u_wr(dev, 0x800, 1);
+
+ mt7601u_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
+ MT_PBF_CFG_TX1Q_EN |
+ MT_PBF_CFG_TX2Q_EN |
+ MT_PBF_CFG_TX3Q_EN));
+
+ mt7601u_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN));
+ val = mt76_set(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_CLR);
+ val &= ~MT_USB_DMA_CFG_TX_CLR;
+ mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+
+ /* FCE tx_fs_base_ptr */
+ mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+ /* FCE pdma enable */
+ mt7601u_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt7601u_wr(dev, MT_FCE_SKIP_FS, 3);
+
+ ret = mt7601u_upload_firmware(dev, (const struct mt76_fw *)fw->data);
+
+ release_firmware(fw);
+
+ return ret;
+
+err_inv_fw:
+ dev_err(dev->dev, "Invalid firmware image\n");
+ release_firmware(fw);
+ return -ENOENT;
+}
+
+int mt7601u_mcu_init(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ mutex_init(&dev->mcu.mutex);
+
+ ret = mt7601u_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT7601U_STATE_MCU_RUNNING, &dev->state);
+
+ return 0;
+}
+
+int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ ret = mt7601u_mcu_function_select(dev, Q_SELECT, 1);
+ if (ret)
+ return ret;
+
+ init_completion(&dev->mcu.resp_cmpl);
+ if (mt7601u_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
+ mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+ return -ENOMEM;
+ }
+
+ ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &dev->mcu.resp, GFP_KERNEL,
+ mt7601u_complete_urb, &dev->mcu.resp_cmpl);
+ if (ret) {
+ mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+ return ret;
+ }
+
+ return 0;
+}
+
+void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev)
+{
+ usb_kill_urb(dev->mcu.resp.urb);
+ mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.h b/drivers/net/wireless/mediatek/mt7601u/mcu.h
new file mode 100644
index 000000000000..4a66d1092a18
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_MCU_H
+#define __MT7601U_MCU_H
+
+struct mt7601u_dev;
+
+/* Register definitions */
+#define MT_MCU_RESET_CTL 0x070C
+#define MT_MCU_INT_LEVEL 0x0718
+#define MT_MCU_COM_REG0 0x0730
+#define MT_MCU_COM_REG1 0x0734
+#define MT_MCU_COM_REG2 0x0738
+#define MT_MCU_COM_REG3 0x073C
+
+#define MT_MCU_IVB_SIZE 0x40
+#define MT_MCU_DLM_OFFSET 0x80000
+
+#define MT_MCU_MEMMAP_WLAN 0x00410000
+#define MT_MCU_MEMMAP_BBP 0x40000000
+#define MT_MCU_MEMMAP_RF 0x80000000
+
+#define INBAND_PACKET_MAX_LEN 192
+
+enum mcu_cmd {
+ CMD_FUN_SET_OP = 1,
+ CMD_LOAD_CR = 2,
+ CMD_INIT_GAIN_OP = 3,
+ CMD_DYNC_VGA_OP = 6,
+ CMD_TDLS_CH_SW = 7,
+ CMD_BURST_WRITE = 8,
+ CMD_READ_MODIFY_WRITE = 9,
+ CMD_RANDOM_READ = 10,
+ CMD_BURST_READ = 11,
+ CMD_RANDOM_WRITE = 12,
+ CMD_LED_MODE_OP = 16,
+ CMD_POWER_SAVING_OP = 20,
+ CMD_WOW_CONFIG = 21,
+ CMD_WOW_QUERY = 22,
+ CMD_WOW_FEATURE = 24,
+ CMD_CARRIER_DETECT_OP = 28,
+ CMD_RADOR_DETECT_OP = 29,
+ CMD_SWITCH_CHANNEL_OP = 30,
+ CMD_CALIBRATION_OP = 31,
+ CMD_BEACON_OP = 32,
+ CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+ Q_SELECT = 1,
+ ATOMIC_TSSI_SETTING = 5,
+};
+
+enum mcu_power_mode {
+ RADIO_OFF = 0x30,
+ RADIO_ON = 0x31,
+ RADIO_OFF_AUTO_WAKEUP = 0x32,
+ RADIO_OFF_ADVANCE = 0x33,
+ RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibrate {
+ MCU_CAL_R = 1,
+ MCU_CAL_DCOC,
+ MCU_CAL_LC,
+ MCU_CAL_LOFT,
+ MCU_CAL_TXIQ,
+ MCU_CAL_BW,
+ MCU_CAL_DPD,
+ MCU_CAL_RXIQ,
+ MCU_CAL_TXDCOC,
+};
+
+int mt7601u_mcu_init(struct mt7601u_dev *dev);
+int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev);
+void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev);
+
+int
+mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val);
+int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
new file mode 100644
index 000000000000..9102be6b95cb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
@@ -0,0 +1,390 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MT7601U_H
+#define MT7601U_H
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/completion.h>
+#include <net/mac80211.h>
+#include <linux/debugfs.h>
+
+#include "regs.h"
+#include "util.h"
+
+#define MT_CALIBRATE_INTERVAL (4 * HZ)
+
+#define MT_FREQ_CAL_INIT_DELAY (30 * HZ)
+#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ)
+#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2)
+
+#define MT_BBP_REG_VERSION 0x00
+
+#define MT_USB_AGGR_SIZE_LIMIT 28 /* * 1024B */
+#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
+#define MT_RX_ORDER 3
+#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER)
+
+struct mt7601u_dma_buf {
+ struct urb *urb;
+ void *buf;
+ dma_addr_t dma;
+ size_t len;
+};
+
+struct mt7601u_mcu {
+ struct mutex mutex;
+
+ u8 msg_seq;
+
+ struct mt7601u_dma_buf resp;
+ struct completion resp_cmpl;
+};
+
+struct mt7601u_freq_cal {
+ struct delayed_work work;
+ u8 freq;
+ bool enabled;
+ bool adjusting;
+};
+
+struct mac_stats {
+ u64 rx_stat[6];
+ u64 tx_stat[6];
+ u64 aggr_stat[2];
+ u64 aggr_n[32];
+ u64 zero_len_del[2];
+};
+
+#define N_RX_ENTRIES 16
+struct mt7601u_rx_queue {
+ struct mt7601u_dev *dev;
+
+ struct mt7601u_dma_buf_rx {
+ struct urb *urb;
+ struct page *p;
+ } e[N_RX_ENTRIES];
+
+ unsigned int start;
+ unsigned int end;
+ unsigned int entries;
+ unsigned int pending;
+};
+
+#define N_TX_ENTRIES 64
+
+struct mt7601u_tx_queue {
+ struct mt7601u_dev *dev;
+
+ struct mt7601u_dma_buf_tx {
+ struct urb *urb;
+ struct sk_buff *skb;
+ } e[N_TX_ENTRIES];
+
+ unsigned int start;
+ unsigned int end;
+ unsigned int entries;
+ unsigned int used;
+ unsigned int fifo_seq;
+};
+
+/* WCID allocation:
+ * 0: mcast wcid
+ * 1: bssid wcid
+ * 1...: STAs
+ * ...7e: group wcids
+ * 7f: reserved
+ */
+#define N_WCIDS 128
+#define GROUP_WCID(idx) (N_WCIDS - 2 - idx)
+
+struct mt7601u_eeprom_params;
+
+#define MT_EE_TEMPERATURE_SLOPE 39
+#define MT_FREQ_OFFSET_INVALID -128
+
+enum mt_temp_mode {
+ MT_TEMP_MODE_NORMAL,
+ MT_TEMP_MODE_HIGH,
+ MT_TEMP_MODE_LOW,
+};
+
+enum mt_bw {
+ MT_BW_20,
+ MT_BW_40,
+};
+
+enum {
+ MT7601U_STATE_INITIALIZED,
+ MT7601U_STATE_REMOVED,
+ MT7601U_STATE_WLAN_RUNNING,
+ MT7601U_STATE_MCU_RUNNING,
+ MT7601U_STATE_SCANNING,
+ MT7601U_STATE_READING_STATS,
+ MT7601U_STATE_MORE_STATS,
+};
+
+/**
+ * struct mt7601u_dev - adapter structure
+ * @lock: protects @wcid->tx_rate.
+ * @tx_lock: protects @tx_q and changes of MT7601U_STATE_*_STATS
+ flags in @state.
+ * @rx_lock: protects @rx_q.
+ * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
+ * @mutex: ensures exclusive access from mac80211 callbacks.
+ * @vendor_req_mutex: ensures atomicity of vendor requests.
+ * @reg_atomic_mutex: ensures atomicity of indirect register accesses
+ * (accesses to RF and BBP).
+ * @hw_atomic_mutex: ensures exclusive access to HW during critical
+ * operations (power management, channel switch).
+ */
+struct mt7601u_dev {
+ struct ieee80211_hw *hw;
+ struct device *dev;
+
+ unsigned long state;
+
+ struct mutex mutex;
+
+ unsigned long wcid_mask[N_WCIDS / BITS_PER_LONG];
+
+ struct cfg80211_chan_def chandef;
+ struct ieee80211_supported_band *sband_2g;
+
+ struct mt7601u_mcu mcu;
+
+ struct delayed_work cal_work;
+ struct delayed_work mac_work;
+
+ struct workqueue_struct *stat_wq;
+ struct delayed_work stat_work;
+
+ struct mt76_wcid *mon_wcid;
+ struct mt76_wcid __rcu *wcid[N_WCIDS];
+
+ spinlock_t lock;
+
+ const u16 *beacon_offsets;
+
+ u8 macaddr[ETH_ALEN];
+ struct mt7601u_eeprom_params *ee;
+
+ struct mutex vendor_req_mutex;
+ struct mutex reg_atomic_mutex;
+ struct mutex hw_atomic_mutex;
+
+ u32 rxfilter;
+ u32 debugfs_reg;
+
+ u8 out_eps[8];
+ u8 in_eps[8];
+ u16 out_max_packet;
+ u16 in_max_packet;
+
+ /* TX */
+ spinlock_t tx_lock;
+ struct mt7601u_tx_queue *tx_q;
+
+ atomic_t avg_ampdu_len;
+
+ /* RX */
+ spinlock_t rx_lock;
+ struct tasklet_struct rx_tasklet;
+ struct mt7601u_rx_queue rx_q;
+
+ /* Connection monitoring things */
+ spinlock_t con_mon_lock;
+ u8 ap_bssid[ETH_ALEN];
+
+ s8 bcn_freq_off;
+ u8 bcn_phy_mode;
+
+ int avg_rssi; /* starts at 0 and converges */
+
+ u8 agc_save;
+
+ struct mt7601u_freq_cal freq_cal;
+
+ bool tssi_read_trig;
+
+ s8 tssi_init;
+ s8 tssi_init_hvga;
+ s16 tssi_init_hvga_offset_db;
+
+ int prev_pwr_diff;
+
+ enum mt_temp_mode temp_mode;
+ int curr_temp;
+ int dpd_temp;
+ s8 raw_temp;
+ bool pll_lock_protect;
+
+ u8 bw;
+ bool chan_ext_below;
+
+ /* PA mode */
+ u32 rf_pa_mode[2];
+
+ struct mac_stats stats;
+};
+
+struct mt7601u_tssi_params {
+ char tssi0;
+ int trgt_power;
+};
+
+struct mt76_wcid {
+ u8 idx;
+ u8 hw_key_idx;
+
+ u16 tx_rate;
+ bool tx_rate_set;
+ u8 tx_rate_nss;
+};
+
+struct mt76_vif {
+ u8 idx;
+
+ struct mt76_wcid group_wcid;
+};
+
+struct mt76_sta {
+ struct mt76_wcid wcid;
+ u16 agg_ssn[IEEE80211_NUM_TIDS];
+};
+
+struct mt76_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+struct mt7601u_rxwi;
+
+extern const struct ieee80211_ops mt7601u_ops;
+
+void mt7601u_init_debugfs(struct mt7601u_dev *dev);
+
+u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset);
+void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val);
+u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
+u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
+void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset,
+ const void *data, int len);
+
+int mt7601u_wait_asic_ready(struct mt7601u_dev *dev);
+bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout);
+bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout);
+
+/* Compatibility with mt76 */
+#define mt76_rmw_field(_dev, _reg, _field, _val) \
+ mt76_rmw(_dev, _reg, _field, MT76_SET(_field, _val))
+
+static inline u32 mt76_rr(struct mt7601u_dev *dev, u32 offset)
+{
+ return mt7601u_rr(dev, offset);
+}
+
+static inline void mt76_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+ return mt7601u_wr(dev, offset, val);
+}
+
+static inline u32
+mt76_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ return mt7601u_rmw(dev, offset, mask, val);
+}
+
+static inline u32 mt76_set(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+ return mt76_rmw(dev, offset, 0, val);
+}
+
+static inline u32 mt76_clear(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+ return mt76_rmw(dev, offset, val, 0);
+}
+
+int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int len);
+int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
+ const u32 *data, int n);
+void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr);
+
+/* Init */
+struct mt7601u_dev *mt7601u_alloc_device(struct device *dev);
+int mt7601u_init_hardware(struct mt7601u_dev *dev);
+int mt7601u_register_device(struct mt7601u_dev *dev);
+void mt7601u_cleanup(struct mt7601u_dev *dev);
+
+int mt7601u_mac_start(struct mt7601u_dev *dev);
+void mt7601u_mac_stop(struct mt7601u_dev *dev);
+
+/* PHY */
+int mt7601u_phy_init(struct mt7601u_dev *dev);
+int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev);
+void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path);
+void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 path);
+int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw);
+void mt7601u_agc_save(struct mt7601u_dev *dev);
+void mt7601u_agc_restore(struct mt7601u_dev *dev);
+int mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev);
+int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
+ struct mt7601u_rxwi *rxwi, u16 rate);
+void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
+ struct ieee80211_bss_conf *info);
+
+/* MAC */
+void mt7601u_mac_work(struct work_struct *work);
+void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
+ int ht_mode);
+void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb);
+void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval);
+void
+mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev);
+
+/* TX */
+void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb);
+void mt7601u_tx_stat(struct work_struct *work);
+
+/* util */
+void mt76_remove_hdr_pad(struct sk_buff *skb);
+int mt76_insert_hdr_pad(struct sk_buff *skb);
+
+u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below);
+
+static inline u32 mt7601u_mac_set_ctrlch(struct mt7601u_dev *dev, bool below)
+{
+ return mt7601u_rmc(dev, MT_TX_BAND_CFG, 1, below);
+}
+
+int mt7601u_dma_init(struct mt7601u_dev *dev);
+void mt7601u_dma_cleanup(struct mt7601u_dev *dev);
+
+int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
+ struct mt76_wcid *wcid, int hw_q);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
new file mode 100644
index 000000000000..1908af6add87
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -0,0 +1,1251 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "initvals_phy.h"
+
+#include <linux/etherdevice.h>
+
+static void mt7601u_agc_reset(struct mt7601u_dev *dev);
+
+static int
+mt7601u_rf_wr(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 value)
+{
+ int ret = 0;
+
+ if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+ WARN_ON(offset > 63))
+ return -EINVAL;
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return 0;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_DATA, value) |
+ MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) |
+ MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) |
+ MT_RF_CSR_CFG_WR |
+ MT_RF_CSR_CFG_KICK);
+ trace_rf_write(dev, bank, offset, value);
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->dev, "Error: RF write %02hhx:%02hhx failed:%d!!\n",
+ bank, offset, ret);
+
+ return ret;
+}
+
+static int
+mt7601u_rf_rr(struct mt7601u_dev *dev, u8 bank, u8 offset)
+{
+ int ret = -ETIMEDOUT;
+ u32 val;
+
+ if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+ WARN_ON(offset > 63))
+ return -EINVAL;
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return 0xff;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) |
+ MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) |
+ MT_RF_CSR_CFG_KICK);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ val = mt7601u_rr(dev, MT_RF_CSR_CFG);
+ if (MT76_GET(MT_RF_CSR_CFG_REG_ID, val) == offset &&
+ MT76_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+ ret = MT76_GET(MT_RF_CSR_CFG_DATA, val);
+ trace_rf_read(dev, bank, offset, ret);
+ }
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->dev, "Error: RF read %02hhx:%02hhx failed:%d!!\n",
+ bank, offset, ret);
+
+ return ret;
+}
+
+static int
+mt7601u_rf_rmw(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = mt7601u_rf_rr(dev, bank, offset);
+ if (ret < 0)
+ return ret;
+ val |= ret & ~mask;
+ ret = mt7601u_rf_wr(dev, bank, offset, val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static int
+mt7601u_rf_set(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 val)
+{
+ return mt7601u_rf_rmw(dev, bank, offset, 0, val);
+}
+
+static int
+mt7601u_rf_clear(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask)
+{
+ return mt7601u_rf_rmw(dev, bank, offset, mask, 0);
+}
+
+static void mt7601u_bbp_wr(struct mt7601u_dev *dev, u8 offset, u8 val)
+{
+ if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+ test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) {
+ dev_err(dev->dev, "Error: BBP write %02hhx failed!!\n", offset);
+ goto out;
+ }
+
+ mt7601u_wr(dev, MT_BBP_CSR_CFG,
+ MT76_SET(MT_BBP_CSR_CFG_VAL, val) |
+ MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) |
+ MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY);
+ trace_bbp_write(dev, offset, val);
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+}
+
+static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset)
+{
+ u32 val;
+ int ret = -ETIMEDOUT;
+
+ if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)))
+ return -EINVAL;
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return 0xff;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000))
+ goto out;
+
+ mt7601u_wr(dev, MT_BBP_CSR_CFG,
+ MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) |
+ MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY |
+ MT_BBP_CSR_CFG_READ);
+
+ if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000))
+ goto out;
+
+ val = mt7601u_rr(dev, MT_BBP_CSR_CFG);
+ if (MT76_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) {
+ ret = MT76_GET(MT_BBP_CSR_CFG_VAL, val);
+ trace_bbp_read(dev, offset, ret);
+ }
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->dev, "Error: BBP read %02hhx failed:%d!!\n",
+ offset, ret);
+
+ return ret;
+}
+
+static int mt7601u_bbp_rmw(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = mt7601u_bbp_rr(dev, offset);
+ if (ret < 0)
+ return ret;
+ val |= ret & ~mask;
+ mt7601u_bbp_wr(dev, offset, val);
+
+ return val;
+}
+
+static u8 mt7601u_bbp_rmc(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = mt7601u_bbp_rr(dev, offset);
+ if (ret < 0)
+ return ret;
+ val |= ret & ~mask;
+ if (ret != val)
+ mt7601u_bbp_wr(dev, offset, val);
+
+ return val;
+}
+
+int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev)
+{
+ int i = 20;
+ u8 val;
+
+ do {
+ val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION);
+ if (val && ~val)
+ break;
+ } while (--i);
+
+ if (!i) {
+ dev_err(dev->dev, "Error: BBP is not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below)
+{
+ return mt7601u_bbp_rmc(dev, 3, 0x20, below ? 0x20 : 0);
+}
+
+int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
+ struct mt7601u_rxwi *rxwi, u16 rate)
+{
+ static const s8 lna[2][2][3] = {
+ /* main LNA */ {
+ /* bw20 */ { -2, 15, 33 },
+ /* bw40 */ { 0, 16, 34 }
+ },
+ /* aux LNA */ {
+ /* bw20 */ { -2, 15, 33 },
+ /* bw40 */ { -2, 16, 34 }
+ }
+ };
+ int bw = MT76_GET(MT_RXWI_RATE_BW, rate);
+ int aux_lna = MT76_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant);
+ int lna_id = MT76_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain);
+ int val;
+
+ if (lna_id) /* LNA id can be 0, 2, 3. */
+ lna_id--;
+
+ val = 8;
+ val -= lna[aux_lna][bw][lna_id];
+ val -= MT76_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain);
+ val -= dev->ee->lna_gain;
+ val -= dev->ee->rssi_offset[0];
+
+ return val;
+}
+
+static void mt7601u_vco_cal(struct mt7601u_dev *dev)
+{
+ mt7601u_rf_wr(dev, 0, 4, 0x0a);
+ mt7601u_rf_wr(dev, 0, 5, 0x20);
+ mt7601u_rf_set(dev, 0, 4, BIT(7));
+ msleep(2);
+}
+
+static int mt7601u_set_bw_filter(struct mt7601u_dev *dev, bool cal)
+{
+ u32 filter = 0;
+ int ret;
+
+ if (!cal)
+ filter |= 0x10000;
+ if (dev->bw != MT_BW_20)
+ filter |= 0x00100;
+
+ /* TX */
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter | 1);
+ if (ret)
+ return ret;
+ /* RX */
+ return mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter);
+}
+
+static int mt7601u_load_bbp_temp_table_bw(struct mt7601u_dev *dev)
+{
+ const struct reg_table *t;
+
+ if (WARN_ON(dev->temp_mode > MT_TEMP_MODE_LOW))
+ return -EINVAL;
+
+ t = &bbp_mode_table[dev->temp_mode][dev->bw];
+
+ return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, t->regs, t->n);
+}
+
+static int mt7601u_bbp_temp(struct mt7601u_dev *dev, int mode, const char *name)
+{
+ const struct reg_table *t;
+ int ret;
+
+ if (dev->temp_mode == mode)
+ return 0;
+
+ dev->temp_mode = mode;
+ trace_temp_mode(dev, mode);
+
+ t = bbp_mode_table[dev->temp_mode];
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ t[2].regs, t[2].n);
+ if (ret)
+ return ret;
+
+ return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ t[dev->bw].regs, t[dev->bw].n);
+}
+
+static void mt7601u_apply_ch14_fixup(struct mt7601u_dev *dev, int hw_chan)
+{
+ struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+
+ if (hw_chan != 14 || dev->bw != MT_BW_20) {
+ mt7601u_bbp_rmw(dev, 4, 0x20, 0);
+ mt7601u_bbp_wr(dev, 178, 0xff);
+
+ t->cck[0].bw20 = dev->ee->real_cck_bw20[0];
+ t->cck[1].bw20 = dev->ee->real_cck_bw20[1];
+ } else { /* Apply CH14 OBW fixup */
+ mt7601u_bbp_wr(dev, 4, 0x60);
+ mt7601u_bbp_wr(dev, 178, 0);
+
+ /* Note: vendor code is buggy here for negative values */
+ t->cck[0].bw20 = dev->ee->real_cck_bw20[0] - 2;
+ t->cck[1].bw20 = dev->ee->real_cck_bw20[1] - 2;
+ }
+}
+
+static int __mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+#define FREQ_PLAN_REGS 4
+ static const u8 freq_plan[14][FREQ_PLAN_REGS] = {
+ { 0x99, 0x99, 0x09, 0x50 },
+ { 0x46, 0x44, 0x0a, 0x50 },
+ { 0xec, 0xee, 0x0a, 0x50 },
+ { 0x99, 0x99, 0x0b, 0x50 },
+ { 0x46, 0x44, 0x08, 0x51 },
+ { 0xec, 0xee, 0x08, 0x51 },
+ { 0x99, 0x99, 0x09, 0x51 },
+ { 0x46, 0x44, 0x0a, 0x51 },
+ { 0xec, 0xee, 0x0a, 0x51 },
+ { 0x99, 0x99, 0x0b, 0x51 },
+ { 0x46, 0x44, 0x08, 0x52 },
+ { 0xec, 0xee, 0x08, 0x52 },
+ { 0x99, 0x99, 0x09, 0x52 },
+ { 0x33, 0x33, 0x0b, 0x52 },
+ };
+ struct mt76_reg_pair channel_freq_plan[FREQ_PLAN_REGS] = {
+ { 17, 0 }, { 18, 0 }, { 19, 0 }, { 20, 0 },
+ };
+ struct mt76_reg_pair bbp_settings[3] = {
+ { 62, 0x37 - dev->ee->lna_gain },
+ { 63, 0x37 - dev->ee->lna_gain },
+ { 64, 0x37 - dev->ee->lna_gain },
+ };
+
+ struct ieee80211_channel *chan = chandef->chan;
+ enum nl80211_channel_type chan_type =
+ cfg80211_get_chandef_type(chandef);
+ struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+ int chan_idx;
+ bool chan_ext_below;
+ u8 bw;
+ int i, ret;
+
+ bw = MT_BW_20;
+ chan_ext_below = (chan_type == NL80211_CHAN_HT40MINUS);
+ chan_idx = chan->hw_value - 1;
+
+ if (chandef->width == NL80211_CHAN_WIDTH_40) {
+ bw = MT_BW_40;
+
+ if (chan_idx > 1 && chan_type == NL80211_CHAN_HT40MINUS)
+ chan_idx -= 2;
+ else if (chan_idx < 12 && chan_type == NL80211_CHAN_HT40PLUS)
+ chan_idx += 2;
+ else
+ dev_err(dev->dev, "Error: invalid 40MHz channel!!\n");
+ }
+
+ if (bw != dev->bw || chan_ext_below != dev->chan_ext_below) {
+ dev_dbg(dev->dev, "Info: switching HT mode bw:%d below:%d\n",
+ bw, chan_ext_below);
+
+ mt7601u_bbp_set_bw(dev, bw);
+
+ mt7601u_bbp_set_ctrlch(dev, chan_ext_below);
+ mt7601u_mac_set_ctrlch(dev, chan_ext_below);
+ dev->chan_ext_below = chan_ext_below;
+ }
+
+ for (i = 0; i < FREQ_PLAN_REGS; i++)
+ channel_freq_plan[i].value = freq_plan[chan_idx][i];
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_RF,
+ channel_freq_plan, FREQ_PLAN_REGS);
+ if (ret)
+ return ret;
+
+ mt7601u_rmw(dev, MT_TX_ALC_CFG_0, 0x3f3f,
+ dev->ee->chan_pwr[chan_idx] & 0x3f);
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ bbp_settings, ARRAY_SIZE(bbp_settings));
+ if (ret)
+ return ret;
+
+ mt7601u_vco_cal(dev);
+ mt7601u_bbp_set_bw(dev, bw);
+ ret = mt7601u_set_bw_filter(dev, false);
+ if (ret)
+ return ret;
+
+ mt7601u_apply_ch14_fixup(dev, chan->hw_value);
+ mt7601u_wr(dev, MT_TX_PWR_CFG_0, int_to_s6(t->ofdm[1].bw20) << 24 |
+ int_to_s6(t->ofdm[0].bw20) << 16 |
+ int_to_s6(t->cck[1].bw20) << 8 |
+ int_to_s6(t->cck[0].bw20));
+
+ if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+ mt7601u_agc_reset(dev);
+
+ dev->chandef = *chandef;
+
+ return 0;
+}
+
+int mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ int ret;
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->freq_cal.work);
+
+ mutex_lock(&dev->hw_atomic_mutex);
+ ret = __mt7601u_phy_set_channel(dev, chandef);
+ mutex_unlock(&dev->hw_atomic_mutex);
+ if (ret)
+ return ret;
+
+ if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+ return 0;
+
+ ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+ if (dev->freq_cal.enabled)
+ ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+ MT_FREQ_CAL_INIT_DELAY);
+ return 0;
+}
+
+#define BBP_R47_FLAG GENMASK(2, 0)
+#define BBP_R47_F_TSSI 0
+#define BBP_R47_F_PKT_T 1
+#define BBP_R47_F_TX_RATE 2
+#define BBP_R47_F_TEMP 4
+/**
+ * mt7601u_bbp_r47_get - read value through BBP R47/R49 pair
+ * @dev: pointer to adapter structure
+ * @reg: value of BBP R47 before the operation
+ * @flag: one of the BBP_R47_F_* flags
+ *
+ * Convenience helper for reading values through BBP R47/R49 pair.
+ * Takes old value of BBP R47 as @reg, because callers usually have it
+ * cached already.
+ *
+ * Return: value of BBP R49.
+ */
+static u8 mt7601u_bbp_r47_get(struct mt7601u_dev *dev, u8 reg, u8 flag)
+{
+ flag |= reg & ~BBP_R47_FLAG;
+ mt7601u_bbp_wr(dev, 47, flag);
+ usleep_range(500, 700);
+ return mt7601u_bbp_rr(dev, 49);
+}
+
+static s8 mt7601u_read_bootup_temp(struct mt7601u_dev *dev)
+{
+ u8 bbp_val, temp;
+ u32 rf_bp, rf_set;
+ int i;
+
+ rf_set = mt7601u_rr(dev, MT_RF_SETTING_0);
+ rf_bp = mt7601u_rr(dev, MT_RF_BYPASS_0);
+
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+ mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000010);
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0x00000010);
+
+ bbp_val = mt7601u_bbp_rmw(dev, 47, 0, 0x10);
+
+ mt7601u_bbp_wr(dev, 22, 0x40);
+
+ for (i = 100; i && (bbp_val & 0x10); i--)
+ bbp_val = mt7601u_bbp_rr(dev, 47);
+
+ temp = mt7601u_bbp_r47_get(dev, bbp_val, BBP_R47_F_TEMP);
+
+ mt7601u_bbp_wr(dev, 22, 0);
+
+ bbp_val = mt7601u_bbp_rr(dev, 21);
+ bbp_val |= 0x02;
+ mt7601u_bbp_wr(dev, 21, bbp_val);
+ bbp_val &= ~0x02;
+ mt7601u_bbp_wr(dev, 21, bbp_val);
+
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+ mt7601u_wr(dev, MT_RF_SETTING_0, rf_set);
+ mt7601u_wr(dev, MT_RF_BYPASS_0, rf_bp);
+
+ trace_read_temp(dev, temp);
+ return temp;
+}
+
+static s8 mt7601u_read_temp(struct mt7601u_dev *dev)
+{
+ int i;
+ u8 val;
+ s8 temp;
+
+ val = mt7601u_bbp_rmw(dev, 47, 0x7f, 0x10);
+
+ /* Note: this rarely succeeds, temp can change even if it fails. */
+ for (i = 100; i && (val & 0x10); i--)
+ val = mt7601u_bbp_rr(dev, 47);
+
+ temp = mt7601u_bbp_r47_get(dev, val, BBP_R47_F_TEMP);
+
+ trace_read_temp(dev, temp);
+ return temp;
+}
+
+static void mt7601u_rxdc_cal(struct mt7601u_dev *dev)
+{
+ static const struct mt76_reg_pair intro[] = {
+ { 158, 0x8d }, { 159, 0xfc },
+ { 158, 0x8c }, { 159, 0x4c },
+ }, outro[] = {
+ { 158, 0x8d }, { 159, 0xe0 },
+ };
+ u32 mac_ctrl;
+ int i, ret;
+
+ mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ intro, ARRAY_SIZE(intro));
+ if (ret)
+ dev_err(dev->dev, "%s intro failed:%d\n", __func__, ret);
+
+ for (i = 20; i; i--) {
+ usleep_range(300, 500);
+
+ mt7601u_bbp_wr(dev, 158, 0x8c);
+ if (mt7601u_bbp_rr(dev, 159) == 0x0c)
+ break;
+ }
+ if (!i)
+ dev_err(dev->dev, "%s timed out\n", __func__);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ outro, ARRAY_SIZE(outro));
+ if (ret)
+ dev_err(dev->dev, "%s outro failed:%d\n", __func__, ret);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl);
+}
+
+void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev)
+{
+ mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->curr_temp);
+
+ mt7601u_rxdc_cal(dev);
+}
+
+/* Note: function copied from vendor driver */
+static s16 lin2dBd(u16 linear)
+{
+ short exp = 0;
+ unsigned int mantisa;
+ int app, dBd;
+
+ if (WARN_ON(!linear))
+ return -10000;
+
+ mantisa = linear;
+
+ exp = fls(mantisa) - 16;
+ if (exp > 0)
+ mantisa >>= exp;
+ else
+ mantisa <<= abs(exp);
+
+ if (mantisa <= 0xb800)
+ app = (mantisa + (mantisa >> 3) + (mantisa >> 4) - 0x9600);
+ else
+ app = (mantisa - (mantisa >> 3) - (mantisa >> 6) - 0x5a00);
+ if (app < 0)
+ app = 0;
+
+ dBd = ((15 + exp) << 15) + app;
+ dBd = (dBd << 2) + (dBd << 1) + (dBd >> 6) + (dBd >> 7);
+ dBd = (dBd >> 10);
+
+ return dBd;
+}
+
+static void
+mt7601u_set_initial_tssi(struct mt7601u_dev *dev, s16 tssi_db, s16 tssi_hvga_db)
+{
+ struct tssi_data *d = &dev->ee->tssi_data;
+ int init_offset;
+
+ init_offset = -((tssi_db * d->slope + d->offset[1]) / 4096) + 10;
+
+ mt76_rmw(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+ int_to_s6(init_offset) & MT_TX_ALC_CFG_1_TEMP_COMP);
+}
+
+static void mt7601u_tssi_dc_gain_cal(struct mt7601u_dev *dev)
+{
+ u8 rf_vga, rf_mixer, bbp_r47;
+ int i, j;
+ s8 res[4];
+ s16 tssi_init_db, tssi_init_hvga_db;
+
+ mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000030);
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0x000c0030);
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ mt7601u_bbp_wr(dev, 58, 0);
+ mt7601u_bbp_wr(dev, 241, 0x2);
+ mt7601u_bbp_wr(dev, 23, 0x8);
+ bbp_r47 = mt7601u_bbp_rr(dev, 47);
+
+ /* Set VGA gain */
+ rf_vga = mt7601u_rf_rr(dev, 5, 3);
+ mt7601u_rf_wr(dev, 5, 3, 8);
+
+ /* Mixer disable */
+ rf_mixer = mt7601u_rf_rr(dev, 4, 39);
+ mt7601u_rf_wr(dev, 4, 39, 0);
+
+ for (i = 0; i < 4; i++) {
+ mt7601u_rf_wr(dev, 4, 39, (i & 1) ? rf_mixer : 0);
+
+ mt7601u_bbp_wr(dev, 23, (i < 2) ? 0x08 : 0x02);
+ mt7601u_rf_wr(dev, 5, 3, (i < 2) ? 0x08 : 0x11);
+
+ /* BBP TSSI initial and soft reset */
+ mt7601u_bbp_wr(dev, 22, 0);
+ mt7601u_bbp_wr(dev, 244, 0);
+
+ mt7601u_bbp_wr(dev, 21, 1);
+ udelay(1);
+ mt7601u_bbp_wr(dev, 21, 0);
+
+ /* TSSI measurement */
+ mt7601u_bbp_wr(dev, 47, 0x50);
+ mt7601u_bbp_wr(dev, (i & 1) ? 244 : 22, (i & 1) ? 0x31 : 0x40);
+
+ for (j = 20; j; j--)
+ if (!(mt7601u_bbp_rr(dev, 47) & 0x10))
+ break;
+ if (!j)
+ dev_err(dev->dev, "%s timed out\n", __func__);
+
+ /* TSSI read */
+ mt7601u_bbp_wr(dev, 47, 0x40);
+ res[i] = mt7601u_bbp_rr(dev, 49);
+ }
+
+ tssi_init_db = lin2dBd((short)res[1] - res[0]);
+ tssi_init_hvga_db = lin2dBd(((short)res[3] - res[2]) * 4);
+ dev->tssi_init = res[0];
+ dev->tssi_init_hvga = res[2];
+ dev->tssi_init_hvga_offset_db = tssi_init_hvga_db - tssi_init_db;
+
+ dev_dbg(dev->dev,
+ "TSSI_init:%hhx db:%hx hvga:%hhx hvga_db:%hx off_db:%hx\n",
+ dev->tssi_init, tssi_init_db, dev->tssi_init_hvga,
+ tssi_init_hvga_db, dev->tssi_init_hvga_offset_db);
+
+ mt7601u_bbp_wr(dev, 22, 0);
+ mt7601u_bbp_wr(dev, 244, 0);
+
+ mt7601u_bbp_wr(dev, 21, 1);
+ udelay(1);
+ mt7601u_bbp_wr(dev, 21, 0);
+
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+ mt7601u_wr(dev, MT_RF_SETTING_0, 0);
+
+ mt7601u_rf_wr(dev, 5, 3, rf_vga);
+ mt7601u_rf_wr(dev, 4, 39, rf_mixer);
+ mt7601u_bbp_wr(dev, 47, bbp_r47);
+
+ mt7601u_set_initial_tssi(dev, tssi_init_db, tssi_init_hvga_db);
+}
+
+static int mt7601u_temp_comp(struct mt7601u_dev *dev, bool on)
+{
+ int ret, temp, hi_temp = 400, lo_temp = -200;
+
+ temp = (dev->raw_temp - dev->ee->ref_temp) * MT_EE_TEMPERATURE_SLOPE;
+ dev->curr_temp = temp;
+
+ /* DPD Calibration */
+ if (temp - dev->dpd_temp > 450 || temp - dev->dpd_temp < -450) {
+ dev->dpd_temp = temp;
+
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp);
+ if (ret)
+ return ret;
+
+ mt7601u_vco_cal(dev);
+
+ dev_dbg(dev->dev, "Recalibrate DPD\n");
+ }
+
+ /* PLL Lock Protect */
+ if (temp < -50 && !dev->pll_lock_protect) { /* < 20C */
+ dev->pll_lock_protect = true;
+
+ mt7601u_rf_wr(dev, 4, 4, 6);
+ mt7601u_rf_clear(dev, 4, 10, 0x30);
+
+ dev_dbg(dev->dev, "PLL lock protect on - too cold\n");
+ } else if (temp > 50 && dev->pll_lock_protect) { /* > 30C */
+ dev->pll_lock_protect = false;
+
+ mt7601u_rf_wr(dev, 4, 4, 0);
+ mt7601u_rf_rmw(dev, 4, 10, 0x30, 0x10);
+
+ dev_dbg(dev->dev, "PLL lock protect off\n");
+ }
+
+ if (on) {
+ hi_temp -= 50;
+ lo_temp -= 50;
+ }
+
+ /* BBP CR for H, L, N temperature */
+ if (temp > hi_temp)
+ return mt7601u_bbp_temp(dev, MT_TEMP_MODE_HIGH, "high");
+ else if (temp > lo_temp)
+ return mt7601u_bbp_temp(dev, MT_TEMP_MODE_NORMAL, "normal");
+ else
+ return mt7601u_bbp_temp(dev, MT_TEMP_MODE_LOW, "low");
+}
+
+/* Note: this is used only with TSSI, we can just use trgt_pwr from eeprom. */
+static int mt7601u_current_tx_power(struct mt7601u_dev *dev)
+{
+ return dev->ee->chan_pwr[dev->chandef.chan->hw_value - 1];
+}
+
+static bool mt7601u_use_hvga(struct mt7601u_dev *dev)
+{
+ return !(mt7601u_current_tx_power(dev) > 20);
+}
+
+static s16
+mt7601u_phy_rf_pa_mode_val(struct mt7601u_dev *dev, int phy_mode, int tx_rate)
+{
+ static const s16 decode_tb[] = { 0, 8847, -5734, -5734 };
+ u32 reg;
+
+ switch (phy_mode) {
+ case MT_PHY_TYPE_OFDM:
+ tx_rate += 4;
+ case MT_PHY_TYPE_CCK:
+ reg = dev->rf_pa_mode[0];
+ break;
+ default:
+ reg = dev->rf_pa_mode[1];
+ break;
+ }
+
+ return decode_tb[(reg >> (tx_rate * 2)) & 0x3];
+}
+
+static struct mt7601u_tssi_params
+mt7601u_tssi_params_get(struct mt7601u_dev *dev)
+{
+ static const u8 ofdm_pkt2rate[8] = { 6, 4, 2, 0, 7, 5, 3, 1 };
+ static const int static_power[4] = { 0, -49152, -98304, 49152 };
+ struct mt7601u_tssi_params p;
+ u8 bbp_r47, pkt_type, tx_rate;
+ struct power_per_rate *rate_table;
+
+ bbp_r47 = mt7601u_bbp_rr(dev, 47);
+
+ p.tssi0 = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TSSI);
+ dev->raw_temp = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TEMP);
+ pkt_type = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_PKT_T);
+
+ p.trgt_power = mt7601u_current_tx_power(dev);
+
+ switch (pkt_type & 0x03) {
+ case MT_PHY_TYPE_CCK:
+ tx_rate = (pkt_type >> 4) & 0x03;
+ rate_table = dev->ee->power_rate_table.cck;
+ break;
+
+ case MT_PHY_TYPE_OFDM:
+ tx_rate = ofdm_pkt2rate[(pkt_type >> 4) & 0x07];
+ rate_table = dev->ee->power_rate_table.ofdm;
+ break;
+
+ default:
+ tx_rate = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TX_RATE);
+ tx_rate &= 0x7f;
+ rate_table = dev->ee->power_rate_table.ht;
+ break;
+ }
+
+ if (dev->bw == MT_BW_20)
+ p.trgt_power += rate_table[tx_rate / 2].bw20;
+ else
+ p.trgt_power += rate_table[tx_rate / 2].bw40;
+
+ p.trgt_power <<= 12;
+
+ dev_dbg(dev->dev, "tx_rate:%02hhx pwr:%08x\n", tx_rate, p.trgt_power);
+
+ p.trgt_power += mt7601u_phy_rf_pa_mode_val(dev, pkt_type & 0x03,
+ tx_rate);
+
+ /* Channel 14, cck, bw20 */
+ if ((pkt_type & 0x03) == MT_PHY_TYPE_CCK) {
+ if (mt7601u_bbp_rr(dev, 4) & 0x20)
+ p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 18022 : 9830;
+ else
+ p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 819 : 24576;
+ }
+
+ p.trgt_power += static_power[mt7601u_bbp_rr(dev, 1) & 0x03];
+
+ p.trgt_power += dev->ee->tssi_data.tx0_delta_offset;
+
+ dev_dbg(dev->dev,
+ "tssi:%02hhx t_power:%08x temp:%02hhx pkt_type:%02hhx\n",
+ p.tssi0, p.trgt_power, dev->raw_temp, pkt_type);
+
+ return p;
+}
+
+static bool mt7601u_tssi_read_ready(struct mt7601u_dev *dev)
+{
+ return !(mt7601u_bbp_rr(dev, 47) & 0x10);
+}
+
+static int mt7601u_tssi_cal(struct mt7601u_dev *dev)
+{
+ struct mt7601u_tssi_params params;
+ int curr_pwr, diff_pwr;
+ char tssi_offset;
+ s8 tssi_init;
+ s16 tssi_m_dc, tssi_db;
+ bool hvga;
+ u32 val;
+
+ if (!dev->ee->tssi_enabled)
+ return 0;
+
+ hvga = mt7601u_use_hvga(dev);
+ if (!dev->tssi_read_trig)
+ return mt7601u_mcu_tssi_read_kick(dev, hvga);
+
+ if (!mt7601u_tssi_read_ready(dev))
+ return 0;
+
+ params = mt7601u_tssi_params_get(dev);
+
+ tssi_init = (hvga ? dev->tssi_init_hvga : dev->tssi_init);
+ tssi_m_dc = params.tssi0 - tssi_init;
+ tssi_db = lin2dBd(tssi_m_dc);
+ dev_dbg(dev->dev, "tssi dc:%04hx db:%04hx hvga:%d\n",
+ tssi_m_dc, tssi_db, hvga);
+
+ if (dev->chandef.chan->hw_value < 5)
+ tssi_offset = dev->ee->tssi_data.offset[0];
+ else if (dev->chandef.chan->hw_value < 9)
+ tssi_offset = dev->ee->tssi_data.offset[1];
+ else
+ tssi_offset = dev->ee->tssi_data.offset[2];
+
+ if (hvga)
+ tssi_db -= dev->tssi_init_hvga_offset_db;
+
+ curr_pwr = tssi_db * dev->ee->tssi_data.slope + (tssi_offset << 9);
+ diff_pwr = params.trgt_power - curr_pwr;
+ dev_dbg(dev->dev, "Power curr:%08x diff:%08x\n", curr_pwr, diff_pwr);
+
+ if (params.tssi0 > 126 && diff_pwr > 0) {
+ dev_err(dev->dev, "Error: TSSI upper saturation\n");
+ diff_pwr = 0;
+ }
+ if (params.tssi0 - tssi_init < 1 && diff_pwr < 0) {
+ dev_err(dev->dev, "Error: TSSI lower saturation\n");
+ diff_pwr = 0;
+ }
+
+ if ((dev->prev_pwr_diff ^ diff_pwr) < 0 && abs(diff_pwr) < 4096 &&
+ (abs(diff_pwr) > abs(dev->prev_pwr_diff) ||
+ (diff_pwr > 0 && diff_pwr == -dev->prev_pwr_diff)))
+ diff_pwr = 0;
+ else
+ dev->prev_pwr_diff = diff_pwr;
+
+ diff_pwr += (diff_pwr > 0) ? 2048 : -2048;
+ diff_pwr /= 4096;
+
+ dev_dbg(dev->dev, "final diff: %08x\n", diff_pwr);
+
+ val = mt7601u_rr(dev, MT_TX_ALC_CFG_1);
+ curr_pwr = s6_to_int(MT76_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val));
+ diff_pwr += curr_pwr;
+ val = (val & ~MT_TX_ALC_CFG_1_TEMP_COMP) | int_to_s6(diff_pwr);
+ mt7601u_wr(dev, MT_TX_ALC_CFG_1, val);
+
+ return mt7601u_mcu_tssi_read_kick(dev, hvga);
+}
+
+static u8 mt7601u_agc_default(struct mt7601u_dev *dev)
+{
+ return (dev->ee->lna_gain - 8) * 2 + 0x34;
+}
+
+static void mt7601u_agc_reset(struct mt7601u_dev *dev)
+{
+ u8 agc = mt7601u_agc_default(dev);
+
+ mt7601u_bbp_wr(dev, 66, agc);
+}
+
+void mt7601u_agc_save(struct mt7601u_dev *dev)
+{
+ dev->agc_save = mt7601u_bbp_rr(dev, 66);
+}
+
+void mt7601u_agc_restore(struct mt7601u_dev *dev)
+{
+ mt7601u_bbp_wr(dev, 66, dev->agc_save);
+}
+
+static void mt7601u_agc_tune(struct mt7601u_dev *dev)
+{
+ u8 val = mt7601u_agc_default(dev);
+
+ if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+ return;
+
+ /* Note: only in STA mode and not dozing; perhaps do this only if
+ * there is enough rssi updates since last run?
+ * Rssi updates are only on beacons and U2M so should work...
+ */
+ spin_lock_bh(&dev->con_mon_lock);
+ if (dev->avg_rssi <= -70)
+ val -= 0x20;
+ else if (dev->avg_rssi <= -60)
+ val -= 0x10;
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ if (val != mt7601u_bbp_rr(dev, 66))
+ mt7601u_bbp_wr(dev, 66, val);
+
+ /* TODO: also if lost a lot of beacons try resetting
+ * (see RTMPSetAGCInitValue() call in mlme.c).
+ */
+}
+
+static void mt7601u_phy_calibrate(struct work_struct *work)
+{
+ struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+ cal_work.work);
+
+ mt7601u_agc_tune(dev);
+ mt7601u_tssi_cal(dev);
+ /* If TSSI calibration was run it already updated temperature. */
+ if (!dev->ee->tssi_enabled)
+ dev->raw_temp = mt7601u_read_temp(dev);
+ mt7601u_temp_comp(dev, true); /* TODO: find right value for @on */
+
+ ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+static unsigned long
+__mt7601u_phy_freq_cal(struct mt7601u_dev *dev, s8 last_offset, u8 phy_mode)
+{
+ u8 activate_threshold, deactivate_threshold;
+
+ trace_freq_cal_offset(dev, phy_mode, last_offset);
+
+ /* No beacons received - reschedule soon */
+ if (last_offset == MT_FREQ_OFFSET_INVALID)
+ return MT_FREQ_CAL_ADJ_INTERVAL;
+
+ switch (phy_mode) {
+ case MT_PHY_TYPE_CCK:
+ activate_threshold = 19;
+ deactivate_threshold = 5;
+ break;
+ case MT_PHY_TYPE_OFDM:
+ activate_threshold = 102;
+ deactivate_threshold = 32;
+ break;
+ case MT_PHY_TYPE_HT:
+ case MT_PHY_TYPE_HT_GF:
+ activate_threshold = 82;
+ deactivate_threshold = 20;
+ break;
+ default:
+ WARN_ON(1);
+ return MT_FREQ_CAL_CHECK_INTERVAL;
+ }
+
+ if (abs(last_offset) >= activate_threshold)
+ dev->freq_cal.adjusting = true;
+ else if (abs(last_offset) <= deactivate_threshold)
+ dev->freq_cal.adjusting = false;
+
+ if (!dev->freq_cal.adjusting)
+ return MT_FREQ_CAL_CHECK_INTERVAL;
+
+ if (last_offset > deactivate_threshold) {
+ if (dev->freq_cal.freq > 0)
+ dev->freq_cal.freq--;
+ else
+ dev->freq_cal.adjusting = false;
+ } else if (last_offset < -deactivate_threshold) {
+ if (dev->freq_cal.freq < 0xbf)
+ dev->freq_cal.freq++;
+ else
+ dev->freq_cal.adjusting = false;
+ }
+
+ trace_freq_cal_adjust(dev, dev->freq_cal.freq);
+ mt7601u_rf_wr(dev, 0, 12, dev->freq_cal.freq);
+ mt7601u_vco_cal(dev);
+
+ return dev->freq_cal.adjusting ? MT_FREQ_CAL_ADJ_INTERVAL :
+ MT_FREQ_CAL_CHECK_INTERVAL;
+}
+
+static void mt7601u_phy_freq_cal(struct work_struct *work)
+{
+ struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+ freq_cal.work.work);
+ s8 last_offset;
+ u8 phy_mode;
+ unsigned long delay;
+
+ spin_lock_bh(&dev->con_mon_lock);
+ last_offset = dev->bcn_freq_off;
+ phy_mode = dev->bcn_phy_mode;
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ delay = __mt7601u_phy_freq_cal(dev, last_offset, phy_mode);
+ ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, delay);
+
+ spin_lock_bh(&dev->con_mon_lock);
+ dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+ spin_unlock_bh(&dev->con_mon_lock);
+}
+
+void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
+ struct ieee80211_bss_conf *info)
+{
+ if (!info->assoc)
+ cancel_delayed_work_sync(&dev->freq_cal.work);
+
+ /* Start/stop collecting beacon data */
+ spin_lock_bh(&dev->con_mon_lock);
+ ether_addr_copy(dev->ap_bssid, info->bssid);
+ dev->avg_rssi = 0;
+ dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ dev->freq_cal.freq = dev->ee->rf_freq_off;
+ dev->freq_cal.enabled = info->assoc;
+ dev->freq_cal.adjusting = false;
+
+ if (info->assoc)
+ ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+ MT_FREQ_CAL_INIT_DELAY);
+}
+
+static int mt7601u_init_cal(struct mt7601u_dev *dev)
+{
+ u32 mac_ctrl;
+ int ret;
+
+ dev->raw_temp = mt7601u_read_bootup_temp(dev);
+ dev->curr_temp = (dev->raw_temp - dev->ee->ref_temp) *
+ MT_EE_TEMPERATURE_SLOPE;
+ dev->dpd_temp = dev->curr_temp;
+
+ mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_R, 0);
+ if (ret)
+ return ret;
+
+ ret = mt7601u_rf_rr(dev, 0, 4);
+ if (ret < 0)
+ return ret;
+ ret |= 0x80;
+ ret = mt7601u_rf_wr(dev, 0, 4, ret);
+ if (ret)
+ return ret;
+ msleep(2);
+
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXDCOC, 0);
+ if (ret)
+ return ret;
+
+ mt7601u_rxdc_cal(dev);
+
+ ret = mt7601u_set_bw_filter(dev, true);
+ if (ret)
+ return ret;
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_LOFT, 0);
+ if (ret)
+ return ret;
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXIQ, 0);
+ if (ret)
+ return ret;
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_RXIQ, 0);
+ if (ret)
+ return ret;
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp);
+ if (ret)
+ return ret;
+
+ mt7601u_rxdc_cal(dev);
+
+ mt7601u_tssi_dc_gain_cal(dev);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl);
+
+ mt7601u_temp_comp(dev, true);
+
+ return 0;
+}
+
+int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw)
+{
+ u32 val, old;
+
+ if (bw == dev->bw) {
+ /* Vendor driver does the rmc even when no change is needed. */
+ mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10);
+
+ return 0;
+ }
+ dev->bw = bw;
+
+ /* Stop MAC for the time of bw change */
+ old = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+ val = old & ~(MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, val);
+ mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+ 0, 500000);
+
+ mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, old);
+
+ return mt7601u_load_bbp_temp_table_bw(dev);
+}
+
+/**
+ * mt7601u_set_rx_path - set rx path in BBP
+ * @dev: pointer to adapter structure
+ * @path: rx path to set values are 0-based
+ */
+void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path)
+{
+ mt7601u_bbp_rmw(dev, 3, 0x18, path << 3);
+}
+
+/**
+ * mt7601u_set_tx_dac - set which tx DAC to use
+ * @dev: pointer to adapter structure
+ * @path: DAC index, values are 0-based
+ */
+void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 dac)
+{
+ mt7601u_bbp_rmc(dev, 1, 0x18, dac << 3);
+}
+
+int mt7601u_phy_init(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ dev->rf_pa_mode[0] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG0);
+ dev->rf_pa_mode[1] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG1);
+
+ ret = mt7601u_rf_wr(dev, 0, 12, dev->ee->rf_freq_off);
+ if (ret)
+ return ret;
+ ret = mt7601u_write_reg_pairs(dev, 0, rf_central,
+ ARRAY_SIZE(rf_central));
+ if (ret)
+ return ret;
+ ret = mt7601u_write_reg_pairs(dev, 0, rf_channel,
+ ARRAY_SIZE(rf_channel));
+ if (ret)
+ return ret;
+ ret = mt7601u_write_reg_pairs(dev, 0, rf_vga, ARRAY_SIZE(rf_vga));
+ if (ret)
+ return ret;
+
+ ret = mt7601u_init_cal(dev);
+ if (ret)
+ return ret;
+
+ dev->prev_pwr_diff = 100;
+
+ INIT_DELAYED_WORK(&dev->cal_work, mt7601u_phy_calibrate);
+ INIT_DELAYED_WORK(&dev->freq_cal.work, mt7601u_phy_freq_cal);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/regs.h b/drivers/net/wireless/mediatek/mt7601u/regs.h
new file mode 100644
index 000000000000..afd8978e83fa
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/regs.h
@@ -0,0 +1,636 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_REGS_H
+#define __MT76_REGS_H
+
+#include <linux/bitops.h>
+
+#ifndef GENMASK
+#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#endif
+
+#define MT_ASIC_VERSION 0x0000
+
+#define MT76XX_REV_E3 0x22
+#define MT76XX_REV_E4 0x33
+
+#define MT_CMB_CTRL 0x0020
+#define MT_CMB_CTRL_XTAL_RDY BIT(22)
+#define MT_CMB_CTRL_PLL_LD BIT(23)
+
+#define MT_EFUSE_CTRL 0x0024
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_DATA_BASE 0x0028
+#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0 0x0040
+#define MT_COEXCFG0_COEX_EN BIT(0)
+
+#define MT_WLAN_FUN_CTRL 0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0 0x0100
+#define MT_XO_CTRL1 0x0104
+#define MT_XO_CTRL2 0x0108
+#define MT_XO_CTRL3 0x010c
+#define MT_XO_CTRL4 0x0110
+
+#define MT_XO_CTRL5 0x0114
+#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8)
+
+#define MT_XO_CTRL6 0x0118
+#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8)
+
+#define MT_XO_CTRL7 0x011c
+
+#define MT_WLAN_MTC_CTRL 0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28)
+
+#define MT_INT_SOURCE_CSR 0x0200
+#define MT_INT_MASK_CSR 0x0204
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n) BIT(_n + 4)
+#define MT_INT_RX_COHERENT BIT(16)
+#define MT_INT_TX_COHERENT BIT(17)
+#define MT_INT_ANY_COHERENT BIT(18)
+#define MT_INT_MCU_CMD BIT(19)
+#define MT_INT_TBTT BIT(20)
+#define MT_INT_PRE_TBTT BIT(21)
+#define MT_INT_TX_STAT BIT(22)
+#define MT_INT_AUTO_WAKEUP BIT(23)
+#define MT_INT_GPTIMER BIT(24)
+#define MT_INT_RXDELAYINT BIT(26)
+#define MT_INT_TXDELAYINT BIT(27)
+
+#define MT_WPDMA_GLO_CFG 0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MT_WPDMA_RST_IDX 0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG 0x0210
+
+#define MT_WMM_AIFSN 0x0214
+#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMIN 0x0218
+#define MT_WMM_CWMIN_MASK GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMAX 0x021c
+#define MT_WMM_CWMAX_MASK GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_TXOP_BASE 0x0220
+#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK GENMASK(15, 0)
+
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+
+#define MT_USB_DMA_CFG 0x238
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
+#define MT_USB_DMA_CFG_PHY_CLR BIT(16)
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25)
+#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 27)
+#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
+
+#define MT_TSO_CTRL 0x0250
+#define MT_HEADER_TRANS_CTRL_REG 0x0260
+
+#define MT_US_CYC_CFG 0x02a4
+#define MT_US_CYC_CNT GENMASK(7, 0)
+
+#define MT_TX_RING_BASE 0x0300
+#define MT_RX_RING_BASE 0x03c0
+#define MT_RING_SIZE 0x10
+
+#define MT_TX_HW_QUEUE_MCU 8
+#define MT_TX_HW_QUEUE_MGMT 9
+
+#define MT_PBF_SYS_CTRL 0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4)
+
+#define MT_PBF_CFG 0x0404
+#define MT_PBF_CFG_TX0Q_EN BIT(0)
+#define MT_PBF_CFG_TX1Q_EN BIT(1)
+#define MT_PBF_CFG_TX2Q_EN BIT(2)
+#define MT_PBF_CFG_TX3Q_EN BIT(3)
+#define MT_PBF_CFG_RX0Q_EN BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT 0x0408
+#define MT_PBF_RX_MAX_PCNT 0x040c
+
+#define MT_BCN_OFFSET_BASE 0x041c
+#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RF_CSR_CFG 0x0500
+#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)
+#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14)
+#define MT_RF_CSR_CFG_WR BIT(30)
+#define MT_RF_CSR_CFG_KICK BIT(31)
+
+#define MT_RF_BYPASS_0 0x0504
+#define MT_RF_BYPASS_1 0x0508
+#define MT_RF_SETTING_0 0x050c
+
+#define MT_RF_DATA_WRITE 0x0524
+
+#define MT_RF_CTRL 0x0528
+#define MT_RF_CTRL_ADDR GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE BIT(12)
+#define MT_RF_CTRL_BUSY BIT(13)
+#define MT_RF_CTRL_IDX BIT(16)
+
+#define MT_RF_DATA_READ 0x052c
+
+#define MT_FCE_PSE_CTRL 0x0800
+#define MT_FCE_PARAMETERS 0x0804
+#define MT_FCE_CSO 0x0808
+
+#define MT_FCE_L2_STUFF 0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
+
+#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
+
+#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
+
+#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
+
+#define MT_FCE_SKIP_FS 0x0a6c
+
+#define MT_MAC_CSR0 0x1000
+
+#define MT_MAC_SYS_CTRL 0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3)
+
+#define MT_MAC_ADDR_DW0 0x1008
+#define MT_MAC_ADDR_DW1 0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
+
+#define MT_MAC_BSSID_DW0 0x1010
+#define MT_MAC_BSSID_DW1 0x1014
+#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG 0x1018
+#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12)
+
+#define MT_BBP_CSR_CFG 0x101c
+#define MT_BBP_CSR_CFG_VAL GENMASK(7, 0)
+#define MT_BBP_CSR_CFG_REG_NUM GENMASK(15, 8)
+#define MT_BBP_CSR_CFG_READ BIT(16)
+#define MT_BBP_CSR_CFG_BUSY BIT(17)
+#define MT_BBP_CSR_CFG_PAR_DUR BIT(18)
+#define MT_BBP_CSR_CFG_RW_MODE BIT(19)
+
+#define MT_AMPDU_MAX_LEN_20M1S 0x1030
+#define MT_AMPDU_MAX_LEN_20M2S 0x1034
+#define MT_AMPDU_MAX_LEN_40M1S 0x1038
+#define MT_AMPDU_MAX_LEN_40M2S 0x103c
+#define MT_AMPDU_MAX_LEN 0x1040
+
+#define MT_WCID_DROP_BASE 0x106c
+#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK 0x108c
+
+#define MT_MAC_APC_BSSID_BASE 0x1090
+#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN BIT(16)
+
+#define MT_XIFS_TIME_CFG 0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29)
+
+#define MT_BKOFF_SLOT_CFG 0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8)
+
+#define MT_BEACON_TIME_CFG 0x1114
+#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG 0x1118
+#define MT_TBTT_TIMER_CFG 0x1124
+
+#define MT_INT_TIMER_CFG 0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN 0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1)
+
+#define MT_MAC_STATUS 0x1200
+#define MT_MAC_STATUS_TX BIT(0)
+#define MT_MAC_STATUS_RX BIT(1)
+
+#define MT_PWR_PIN_CFG 0x1204
+#define MT_AUX_CLK_CFG 0x120c
+
+#define MT_BB_PA_MODE_CFG0 0x1214
+#define MT_BB_PA_MODE_CFG1 0x1218
+#define MT_RF_PA_MODE_CFG0 0x121c
+#define MT_RF_PA_MODE_CFG1 0x1220
+
+#define MT_RF_PA_MODE_ADJ0 0x1228
+#define MT_RF_PA_MODE_ADJ1 0x122c
+
+#define MT_DACCLK_EN_DLY_CFG 0x1264
+
+#define MT_EDCA_CFG_BASE 0x1300
+#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0 0x1314
+#define MT_TX_PWR_CFG_1 0x1318
+#define MT_TX_PWR_CFG_2 0x131c
+#define MT_TX_PWR_CFG_3 0x1320
+#define MT_TX_PWR_CFG_4 0x1324
+
+#define MT_TX_BAND_CFG 0x132c
+#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
+#define MT_TX_BAND_CFG_5G BIT(1)
+#define MT_TX_BAND_CFG_2G BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY 0x1384
+#define MT_TX_MPDU_ADJ_INT 0x1388
+
+#define MT_TX_PWR_CFG_7 0x13d4
+#define MT_TX_PWR_CFG_8 0x13d8
+#define MT_TX_PWR_CFG_9 0x13dc
+
+#define MT_TX_SW_CFG0 0x1330
+#define MT_TX_SW_CFG1 0x1334
+#define MT_TX_SW_CFG2 0x1338
+
+#define MT_TXOP_CTRL_CFG 0x1340
+#define MT_TXOP_TRUN_EN GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8)
+#define MT_TXOP_CTRL
+
+#define MT_TX_RTS_CFG 0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK BIT(24)
+
+#define MT_TX_TIMEOUT_CFG 0x1348
+#define MT_TX_RETRY_CFG 0x134c
+#define MT_TX_LINK_CFG 0x1350
+#define MT_HT_FBK_CFG0 0x1354
+#define MT_HT_FBK_CFG1 0x1358
+#define MT_LG_FBK_CFG0 0x135c
+#define MT_LG_FBK_CFG1 0x1360
+
+#define MT_CCK_PROT_CFG 0x1364
+#define MT_OFDM_PROT_CFG 0x1368
+#define MT_MM20_PROT_CFG 0x136c
+#define MT_MM40_PROT_CFG 0x1370
+#define MT_GF20_PROT_CFG 0x1374
+#define MT_GF40_PROT_CFG 0x1378
+
+#define MT_PROT_RATE GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS BIT(16)
+#define MT_PROT_CTRL_CTS2SELF BIT(17)
+#define MT_PROT_NAV_SHORT BIT(18)
+#define MT_PROT_NAV_LONG BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20 BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40 BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20 BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40 BIT(25)
+#define MT_PROT_RTS_THR_EN BIT(26)
+#define MT_PROT_RATE_CCK_11 0x0003
+#define MT_PROT_RATE_OFDM_6 0x4000
+#define MT_PROT_RATE_OFDM_24 0x4004
+#define MT_PROT_RATE_DUP_OFDM_24 0x4084
+#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \
+ ~MT_PROT_TXOP_ALLOW_MM40 & \
+ ~MT_PROT_TXOP_ALLOW_GF40)
+
+#define MT_EXP_ACK_TIME 0x1380
+
+#define MT_TX_PWR_CFG_0_EXT 0x1390
+#define MT_TX_PWR_CFG_1_EXT 0x1394
+
+#define MT_TX_FBK_LIMIT 0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR 0x13a0
+#define MT_TX1_RF_GAIN_CORR 0x13a4
+#define MT_TX0_RF_GAIN_ATTEN 0x13a8
+
+#define MT_TX_ALC_CFG_0 0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1 0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2 0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX0_BB_GAIN_ATTEN 0x13c0
+
+#define MT_TX_ALC_VGA3 0x13c8
+
+#define MT_TX_PROT_CFG6 0x13e0
+#define MT_TX_PROT_CFG7 0x13e4
+#define MT_TX_PROT_CFG8 0x13e8
+
+#define MT_PIFS_TX_CFG 0x13ec
+
+#define MT_RX_FILTR_CFG 0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR BIT(4)
+#define MT_RX_FILTR_CFG_MCAST BIT(5)
+#define MT_RX_FILTR_CFG_BCAST BIT(6)
+#define MT_RX_FILTR_CFG_DUP BIT(7)
+#define MT_RX_FILTR_CFG_CFACK BIT(8)
+#define MT_RX_FILTR_CFG_CFEND BIT(9)
+#define MT_RX_FILTR_CFG_ACK BIT(10)
+#define MT_RX_FILTR_CFG_CTS BIT(11)
+#define MT_RX_FILTR_CFG_RTS BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL BIT(13)
+#define MT_RX_FILTR_CFG_BA BIT(14)
+#define MT_RX_FILTR_CFG_BAR BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
+
+#define MT_AUTO_RSP_CFG 0x1404
+
+#define MT_AUTO_RSP_PREAMB_SHORT BIT(4)
+
+#define MT_LEGACY_BASIC_RATE 0x1408
+#define MT_HT_BASIC_RATE 0x140c
+
+#define MT_RX_PARSER_CFG 0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0)
+
+#define MT_EXT_CCA_CFG 0x141c
+#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3 0x1478
+
+#define MT_PN_PAD_MODE 0x150c
+
+#define MT_TXOP_HLDR_ET 0x1608
+
+#define MT_PROT_AUTO_TX_CFG 0x1648
+
+#define MT_RX_STA_CNT0 0x1700
+#define MT_RX_STA_CNT1 0x1704
+#define MT_RX_STA_CNT2 0x1708
+#define MT_TX_STA_CNT0 0x170c
+#define MT_TX_STA_CNT1 0x1710
+#define MT_TX_STA_CNT2 0x1714
+
+/* Vendor driver defines content of the second word of STAT_FIFO as follows:
+ * MT_TX_STAT_FIFO_RATE GENMASK(26, 16)
+ * MT_TX_STAT_FIFO_ETXBF BIT(27)
+ * MT_TX_STAT_FIFO_SND BIT(28)
+ * MT_TX_STAT_FIFO_ITXBF BIT(29)
+ * However, tests show that b16-31 have the same layout as TXWI rate_ctl
+ * with rate set to rate at which frame was acked.
+ */
+#define MT_TX_STAT_FIFO 0x1718
+#define MT_TX_STAT_FIFO_VALID BIT(0)
+#define MT_TX_STAT_FIFO_PID_TYPE GENMASK(4, 1)
+#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
+#define MT_TX_STAT_FIFO_AGGR BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ BIT(7)
+#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16)
+
+#define MT_TX_AGG_STAT 0x171c
+
+#define MT_TX_AGG_CNT_BASE0 0x1720
+
+#define MT_MPDU_DENSITY_CNT 0x1740
+
+#define MT_TX_AGG_CNT_BASE1 0x174c
+
+#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \
+ MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+ MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT 0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0)
+
+#define MT_BBP_CORE_BASE 0x2000
+#define MT_BBP_IBI_BASE 0x2100
+#define MT_BBP_AGC_BASE 0x2300
+#define MT_BBP_TXC_BASE 0x2400
+#define MT_BBP_RXC_BASE 0x2500
+#define MT_BBP_TXO_BASE 0x2600
+#define MT_BBP_TXBE_BASE 0x2700
+#define MT_BBP_RXFE_BASE 0x2800
+#define MT_BBP_RXO_BASE 0x2900
+#define MT_BBP_DFS_BASE 0x2a00
+#define MT_BBP_TR_BASE 0x2b00
+#define MT_BBP_CAL_BASE 0x2c00
+#define MT_BBP_DSC_BASE 0x2e00
+#define MT_BBP_PFMU_BASE 0x2f00
+
+#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_GAIN GENMASK(21, 16)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_GAIN GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE 0x1800
+#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE 0x4000
+
+#define MT_WCID_KEY_BASE 0x8000
+#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE 0xa000
+#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE 0xa800
+#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0 0xac00
+#define MT_SKEY_BASE_1 0xb400
+#define MT_SKEY_0(_bss, _idx) \
+ (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx) \
+ (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx) \
+ ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0 0xb000
+#define MT_SKEY_MODE_BASE_1 0xb3f0
+#define MT_SKEY_MODE_0(_bss) \
+ (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss) \
+ (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss) \
+ ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE 0xc000
+
+#define MT_TEMP_SENSOR 0x1d000
+#define MT_TEMP_SENSOR_VAL GENMASK(6, 0)
+
+enum mt76_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_CKIP40,
+ MT_CIPHER_CKIP104,
+ MT_CIPHER_CKIP128,
+ MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.c b/drivers/net/wireless/mediatek/mt7601u/trace.c
new file mode 100644
index 000000000000..8abdd3cd546d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/trace.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.h b/drivers/net/wireless/mediatek/mt7601u/trace.h
new file mode 100644
index 000000000000..289897300ef0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/trace.h
@@ -0,0 +1,400 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(__MT7601U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT7601U_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt7601u.h"
+#include "mac.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt7601u
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s "
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT "%04x=%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+ TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_read,
+ TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_write,
+ TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+TRACE_EVENT(mt_submit_urb,
+ TP_PROTO(struct mt7601u_dev *dev, struct urb *u),
+ TP_ARGS(dev, u),
+ TP_STRUCT__entry(
+ DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = u->pipe;
+ __entry->len = u->transfer_buffer_length;
+ ),
+ TP_printk(DEV_PR_FMT "p:%08x len:%u",
+ DEV_PR_ARG, __entry->pipe, __entry->len)
+);
+
+#define trace_mt_submit_urb_sync(__dev, __pipe, __len) ({ \
+ struct urb u; \
+ u.pipe = __pipe; \
+ u.transfer_buffer_length = __len; \
+ trace_mt_submit_urb(__dev, &u); \
+})
+
+TRACE_EVENT(mt_mcu_msg_send,
+ TP_PROTO(struct mt7601u_dev *dev,
+ struct sk_buff *skb, u32 csum, bool resp),
+ TP_ARGS(dev, skb, csum, resp),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, info)
+ __field(u32, csum)
+ __field(bool, resp)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->info = *(u32 *)skb->data;
+ __entry->csum = csum;
+ __entry->resp = resp;
+ ),
+ TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
+ DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
+);
+
+TRACE_EVENT(mt_vend_req,
+ TP_PROTO(struct mt7601u_dev *dev, unsigned pipe, u8 req, u8 req_type,
+ u16 val, u16 offset, void *buf, size_t buflen, int ret),
+ TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
+ __field(u16, val) __field(u16, offset) __field(void*, buf)
+ __field(int, buflen) __field(int, ret)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = pipe;
+ __entry->req = req;
+ __entry->req_type = req_type;
+ __entry->val = val;
+ __entry->offset = offset;
+ __entry->buf = buf;
+ __entry->buflen = buflen;
+ __entry->ret = ret;
+ ),
+ TP_printk(DEV_PR_FMT
+ "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
+ DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
+ __entry->req_type, __entry->val, __entry->offset,
+ !!__entry->buf, __entry->buflen)
+);
+
+TRACE_EVENT(ee_read,
+ TP_PROTO(struct mt7601u_dev *dev, int offset, u16 val),
+ TP_ARGS(dev, offset, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(int, o) __field(u16, v)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->o = offset;
+ __entry->v = val;
+ ),
+ TP_printk(DEV_PR_FMT "%04x=%04x", DEV_PR_ARG, __entry->o, __entry->v)
+);
+
+DECLARE_EVENT_CLASS(dev_rf_reg_evt,
+ TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, bank)
+ __field(u8, reg)
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ __entry->bank = bank;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
+ DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
+ )
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, rf_read,
+ TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val)
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, rf_write,
+ TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_bbp_reg_evt,
+ TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, reg)
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx=%02hhx",
+ DEV_PR_ARG, __entry->reg, __entry->val
+ )
+);
+
+DEFINE_EVENT(dev_bbp_reg_evt, bbp_read,
+ TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_bbp_reg_evt, bbp_write,
+ TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_simple_evt,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->val = val;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
+ )
+);
+
+DEFINE_EVENT(dev_simple_evt, temp_mode,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+DEFINE_EVENT(dev_simple_evt, read_temp,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+DEFINE_EVENT(dev_simple_evt, freq_cal_adjust,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(freq_cal_offset,
+ TP_PROTO(struct mt7601u_dev *dev, u8 phy_mode, s8 freq_off),
+ TP_ARGS(dev, phy_mode, freq_off),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, phy_mode)
+ __field(s8, freq_off)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->phy_mode = phy_mode;
+ __entry->freq_off = freq_off;
+ ),
+ TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+ DEV_PR_ARG, __entry->phy_mode, __entry->freq_off)
+);
+
+TRACE_EVENT(mt_rx,
+ TP_PROTO(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, u32 f),
+ TP_ARGS(dev, rxwi, f),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field_struct(struct mt7601u_rxwi, rxwi)
+ __field(u32, fce_info)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->rxwi = *rxwi;
+ __entry->fce_info = f;
+ ),
+ TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x frag_sn:%04hx rate:%04hx "
+ "uknw:%02hhx z:%02hhx%02hhx%02hhx snr:%02hhx "
+ "ant:%02hhx gain:%02hhx freq_o:%02hhx "
+ "r:%08x ea:%08x fce:%08x", DEV_PR_ARG,
+ le32_to_cpu(__entry->rxwi.rxinfo),
+ le32_to_cpu(__entry->rxwi.ctl),
+ le16_to_cpu(__entry->rxwi.frag_sn),
+ le16_to_cpu(__entry->rxwi.rate),
+ __entry->rxwi.unknown,
+ __entry->rxwi.zero[0], __entry->rxwi.zero[1],
+ __entry->rxwi.zero[2],
+ __entry->rxwi.snr, __entry->rxwi.ant,
+ __entry->rxwi.gain, __entry->rxwi.freq_off,
+ __entry->rxwi.resv2, __entry->rxwi.expert_ant,
+ __entry->fce_info)
+);
+
+TRACE_EVENT(mt_tx,
+ TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb,
+ struct mt76_sta *sta, struct mt76_txwi *h),
+ TP_ARGS(dev, skb, sta, h),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field_struct(struct mt76_txwi, h)
+ __field(struct sk_buff *, skb)
+ __field(struct mt76_sta *, sta)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->h = *h;
+ __entry->skb = skb;
+ __entry->sta = sta;
+ ),
+ TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate_ctl:%04hx "
+ "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
+ __entry->skb, __entry->sta,
+ le16_to_cpu(__entry->h.flags),
+ le16_to_cpu(__entry->h.rate_ctl),
+ __entry->h.ack_ctl, __entry->h.wcid,
+ le16_to_cpu(__entry->h.len_ctl))
+);
+
+TRACE_EVENT(mt_tx_dma_done,
+ TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb),
+ TP_ARGS(dev, skb),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(struct sk_buff *, skb)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->skb = skb;
+ ),
+ TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
+);
+
+TRACE_EVENT(mt_tx_status_cleaned,
+ TP_PROTO(struct mt7601u_dev *dev, int cleaned),
+ TP_ARGS(dev, cleaned),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(int, cleaned)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->cleaned = cleaned;
+ ),
+ TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
+);
+
+TRACE_EVENT(mt_tx_status,
+ TP_PROTO(struct mt7601u_dev *dev, u32 stat1, u32 stat2),
+ TP_ARGS(dev, stat1, stat2),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, stat1) __field(u32, stat2)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->stat1 = stat1;
+ __entry->stat2 = stat2;
+ ),
+ TP_printk(DEV_PR_FMT "%08x %08x",
+ DEV_PR_ARG, __entry->stat1, __entry->stat2)
+);
+
+TRACE_EVENT(mt_rx_dma_aggr,
+ TP_PROTO(struct mt7601u_dev *dev, int cnt, bool paged),
+ TP_ARGS(dev, cnt, paged),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, cnt)
+ __field(bool, paged)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->cnt = cnt;
+ __entry->paged = paged;
+ ),
+ TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
+ DEV_PR_ARG, __entry->cnt, __entry->paged)
+);
+
+DEFINE_EVENT(dev_simple_evt, set_key,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(set_shared_key,
+ TP_PROTO(struct mt7601u_dev *dev, u8 vid, u8 key),
+ TP_ARGS(dev, vid, key),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, vid)
+ __field(u8, key)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->vid = vid;
+ __entry->key = key;
+ ),
+ TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+ DEV_PR_ARG, __entry->vid, __entry->key)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
new file mode 100644
index 000000000000..0be2080ceab3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "trace.h"
+
+enum mt76_txq_id {
+ MT_TXQ_VO = IEEE80211_AC_VO,
+ MT_TXQ_VI = IEEE80211_AC_VI,
+ MT_TXQ_BE = IEEE80211_AC_BE,
+ MT_TXQ_BK = IEEE80211_AC_BK,
+ MT_TXQ_PSD,
+ MT_TXQ_MCU,
+ __MT_TXQ_MAX
+};
+
+/* Hardware uses mirrored order of queues with Q0 having the highest priority */
+static u8 q2hwq(u8 q)
+{
+ return q ^ 0x3;
+}
+
+/* Take mac80211 Q id from the skb and translate it to hardware Q id */
+static u8 skb2q(struct sk_buff *skb)
+{
+ int qid = skb_get_queue_mapping(skb);
+
+ if (WARN_ON(qid >= MT_TXQ_PSD)) {
+ qid = MT_TXQ_BE;
+ skb_set_queue_mapping(skb, qid);
+ }
+
+ return q2hwq(qid);
+}
+
+/* Note: TX retry reporting is a bit broken.
+ * Retries are reported only once per AMPDU and often come a frame early
+ * i.e. they are reported in the last status preceding the AMPDU. Apart
+ * from the fact that it's hard to know the length of the AMPDU (which is
+ * required to know to how many consecutive frames retries should be
+ * applied), if status comes early on full FIFO it gets lost and retries
+ * of the whole AMPDU become invisible.
+ * As a work-around encode the desired rate in PKT_ID of TX descriptor
+ * and based on that guess the retries (every rate is tried once).
+ * Only downside here is that for MCS0 we have to rely solely on
+ * transmission failures as no retries can ever be reported.
+ * Not having to read EXT_FIFO has a nice effect of doubling the number
+ * of reports which can be fetched.
+ * Also the vendor driver never uses the EXT_FIFO register so it may be
+ * undertested.
+ */
+static u8 mt7601u_tx_pktid_enc(struct mt7601u_dev *dev, u8 rate, bool is_probe)
+{
+ u8 encoded = (rate + 1) + is_probe * 8;
+
+ /* Because PKT_ID 0 disables status reporting only 15 values are
+ * available but 16 are needed (8 MCS * 2 for encoding is_probe)
+ * - we need to cram together two rates. MCS0 and MCS7 with is_probe
+ * share PKT_ID 9.
+ */
+ if (is_probe && rate == 7)
+ return encoded - 7;
+
+ return encoded;
+}
+
+static void
+mt7601u_tx_pktid_dec(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
+{
+ u8 req_rate = stat->pktid;
+ u8 eff_rate = stat->rate & 0x7;
+
+ req_rate -= 1;
+
+ if (req_rate > 7) {
+ stat->is_probe = true;
+ req_rate -= 8;
+
+ /* Decide between MCS0 and MCS7 which share pktid 9 */
+ if (!req_rate && eff_rate)
+ req_rate = 7;
+ }
+
+ stat->retry = req_rate - eff_rate;
+}
+
+static void mt7601u_tx_skb_remove_dma_overhead(struct sk_buff *skb,
+ struct ieee80211_tx_info *info)
+{
+ int pkt_len = (unsigned long)info->status.status_driver_data[0];
+
+ skb_pull(skb, sizeof(struct mt76_txwi) + 4);
+ if (ieee80211_get_hdrlen_from_skb(skb) % 4)
+ mt76_remove_hdr_pad(skb);
+
+ skb_trim(skb, pkt_len);
+}
+
+void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ mt7601u_tx_skb_remove_dma_overhead(skb, info);
+
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status(dev->hw, skb);
+}
+
+static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
+{
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u32 need_head;
+
+ need_head = sizeof(struct mt76_txwi) + 4;
+ if (hdr_len % 4)
+ need_head += 2;
+
+ return skb_cow(skb, need_head);
+}
+
+static struct mt76_txwi *
+mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, struct mt76_wcid *wcid,
+ int pkt_len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct mt76_txwi *txwi;
+ unsigned long flags;
+ bool is_probe;
+ u32 pkt_id;
+ u16 rate_ctl;
+ u8 nss;
+
+ txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (!wcid->tx_rate_set)
+ ieee80211_get_tx_rates(info->control.vif, sta, skb,
+ info->control.rates, 1);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (rate->idx < 0 || !rate->count)
+ rate_ctl = wcid->tx_rate;
+ else
+ rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ txwi->rate_ctl = cpu_to_le16(rate_ctl);
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 63, ba_size);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ ba_size = 0;
+ txwi->ack_ctl |= MT76_SET(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+ txwi->flags = cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
+ MT76_SET(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density));
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ txwi->flags = 0;
+ }
+
+ txwi->wcid = wcid->idx;
+
+ is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+ pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe);
+ pkt_len |= MT76_SET(MT_TXWI_LEN_PKTID, pkt_id);
+ txwi->len_ctl = cpu_to_le16(pkt_len);
+
+ return txwi;
+}
+
+void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt7601u_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_sta *sta = control->sta;
+ struct mt76_sta *msta = NULL;
+ struct mt76_wcid *wcid = dev->mon_wcid;
+ struct mt76_txwi *txwi;
+ int pkt_len = skb->len;
+ int hw_q = skb2q(skb);
+
+ BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+ info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
+
+ if (mt7601u_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) {
+ ieee80211_free_txskb(dev->hw, skb);
+ return;
+ }
+
+ if (sta) {
+ msta = (struct mt76_sta *) sta->drv_priv;
+ wcid = &msta->wcid;
+ } else if (vif) {
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+
+ wcid = &mvif->group_wcid;
+ }
+
+ txwi = mt7601u_push_txwi(dev, skb, sta, wcid, pkt_len);
+
+ if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q))
+ return;
+
+ trace_mt_tx(dev, skb, msta, txwi);
+}
+
+void mt7601u_tx_stat(struct work_struct *work)
+{
+ struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+ stat_work.work);
+ struct mt76_tx_status stat;
+ unsigned long flags;
+ int cleaned = 0;
+
+ while (!test_bit(MT7601U_STATE_REMOVED, &dev->state)) {
+ stat = mt7601u_mac_fetch_tx_status(dev);
+ if (!stat.valid)
+ break;
+
+ mt7601u_tx_pktid_dec(dev, &stat);
+ mt76_send_tx_status(dev, &stat);
+
+ cleaned++;
+ }
+ trace_mt_tx_status_cleaned(dev, cleaned);
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+ if (cleaned)
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(10));
+ else if (test_and_clear_bit(MT7601U_STATE_MORE_STATS, &dev->state))
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(20));
+ else
+ clear_bit(MT7601U_STATE_READING_STATS, &dev->state);
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
+ u32 val;
+
+ /* TODO: should we do funny things with the parameters?
+ * See what mt7601u_set_default_edca() used to do in init.c.
+ */
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ WARN_ON(params->txop > 0xff);
+ WARN_ON(params->aifs > 0xf);
+ WARN_ON(cw_min > 0xf);
+ WARN_ON(cw_max > 0xf);
+
+ val = MT76_SET(MT_EDCA_CFG_AIFSN, params->aifs) |
+ MT76_SET(MT_EDCA_CFG_CWMIN, cw_min) |
+ MT76_SET(MT_EDCA_CFG_CWMAX, cw_max);
+ /* TODO: based on user-controlled EnableTxBurst var vendor drv sets
+ * a really long txop on AC0 (see connect.c:2009) but only on
+ * connect? When not connected should be 0.
+ */
+ if (!hw_q)
+ val |= 0x60;
+ else
+ val |= MT76_SET(MT_EDCA_CFG_TXOP, params->txop);
+ mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
new file mode 100644
index 000000000000..99e2b3997bfa
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7601u.h"
+#include "usb.h"
+#include "trace.h"
+
+static struct usb_device_id mt7601u_device_table[] = {
+ { USB_DEVICE(0x0b05, 0x17d3) },
+ { USB_DEVICE(0x0e8d, 0x760a) },
+ { USB_DEVICE(0x0e8d, 0x760b) },
+ { USB_DEVICE(0x13d3, 0x3431) },
+ { USB_DEVICE(0x13d3, 0x3434) },
+ { USB_DEVICE(0x148f, 0x7601) },
+ { USB_DEVICE(0x148f, 0x760a) },
+ { USB_DEVICE(0x148f, 0x760b) },
+ { USB_DEVICE(0x148f, 0x760c) },
+ { USB_DEVICE(0x148f, 0x760d) },
+ { USB_DEVICE(0x2001, 0x3d04) },
+ { USB_DEVICE(0x2717, 0x4106) },
+ { USB_DEVICE(0x2955, 0x0001) },
+ { USB_DEVICE(0x2955, 0x1001) },
+ { USB_DEVICE(0x2a5f, 0x1000) },
+ { USB_DEVICE(0x7392, 0x7710) },
+ { 0, }
+};
+
+bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len,
+ struct mt7601u_dma_buf *buf)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+
+ buf->len = len;
+ buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma);
+
+ return !buf->urb || !buf->buf;
+}
+
+void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+
+ usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma);
+ usb_free_urb(buf->urb);
+}
+
+int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx,
+ struct mt7601u_dma_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ unsigned pipe;
+ int ret;
+
+ if (dir == USB_DIR_IN)
+ pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[ep_idx]);
+ else
+ pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep_idx]);
+
+ usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len,
+ complete_fn, context);
+ buf->urb->transfer_dma = buf->dma;
+ buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ trace_mt_submit_urb(dev, buf->urb);
+ ret = usb_submit_urb(buf->urb, gfp);
+ if (ret)
+ dev_err(dev->dev, "Error: submit URB dir:%d ep:%d failed:%d\n",
+ dir, ep_idx, ret);
+ return ret;
+}
+
+void mt7601u_complete_urb(struct urb *urb)
+{
+ struct completion *cmpl = urb->context;
+
+ complete(cmpl);
+}
+
+static int
+__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen)
+{
+ int i, ret;
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+ const unsigned int pipe = (direction == USB_DIR_IN) ?
+ usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
+
+ for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+ ret = usb_control_msg(usb_dev, pipe, req, req_type,
+ val, offset, buf, buflen,
+ MT_VEND_REQ_TOUT_MS);
+ trace_mt_vend_req(dev, pipe, req, req_type, val, offset,
+ buf, buflen, ret);
+
+ if (ret >= 0 || ret == -ENODEV)
+ return ret;
+
+ msleep(5);
+ }
+
+ dev_err(dev->dev, "Vendor request req:%02x off:%04x failed:%d\n",
+ req, offset, ret);
+
+ return ret;
+}
+
+int
+mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen)
+{
+ int ret;
+
+ mutex_lock(&dev->vendor_req_mutex);
+
+ ret = __mt7601u_vendor_request(dev, req, direction, val, offset,
+ buf, buflen);
+ if (ret == -ENODEV)
+ set_bit(MT7601U_STATE_REMOVED, &dev->state);
+
+ mutex_unlock(&dev->vendor_req_mutex);
+
+ return ret;
+}
+
+void mt7601u_vendor_reset(struct mt7601u_dev *dev)
+{
+ mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+ MT_VEND_DEV_MODE_RESET, 0, NULL, 0);
+}
+
+u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
+{
+ int ret;
+ __le32 reg;
+ u32 val;
+
+ WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+
+ ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN,
+ 0, offset, &reg, sizeof(reg));
+ val = le32_to_cpu(reg);
+ if (ret > 0 && ret != sizeof(reg)) {
+ dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
+ ret, offset);
+ val = ~0;
+ }
+
+ trace_reg_read(dev, offset, val);
+ return val;
+}
+
+int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
+ const u16 offset, const u32 val)
+{
+ int ret;
+
+ ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+ val & 0xffff, offset, NULL, 0);
+ if (ret)
+ return ret;
+ return mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+ val >> 16, offset + 2, NULL, 0);
+}
+
+void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+ WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset);
+
+ mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val);
+ trace_reg_write(dev, offset, val);
+}
+
+u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ val |= mt7601u_rr(dev, offset) & ~mask;
+ mt7601u_wr(dev, offset, val);
+ return val;
+}
+
+u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ u32 reg = mt7601u_rr(dev, offset);
+
+ val |= reg & ~mask;
+ if (reg != val)
+ mt7601u_wr(dev, offset, val);
+ return val;
+}
+
+void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset);
+ WARN_ONCE(len & 3, "short write copy off:%08x", offset);
+
+ mt7601u_burst_write_regs(dev, offset, data, len / 4);
+}
+
+void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr)
+{
+ mt7601u_wr(dev, offset, get_unaligned_le32(addr));
+ mt7601u_wr(dev, offset + 4, addr[4] | addr[5] << 8);
+}
+
+static int mt7601u_assign_pipes(struct usb_interface *usb_intf,
+ struct mt7601u_dev *dev)
+{
+ struct usb_endpoint_descriptor *ep_desc;
+ struct usb_host_interface *intf_desc = usb_intf->cur_altsetting;
+ unsigned i, ep_i = 0, ep_o = 0;
+
+ BUILD_BUG_ON(sizeof(dev->in_eps) < __MT_EP_IN_MAX);
+ BUILD_BUG_ON(sizeof(dev->out_eps) < __MT_EP_OUT_MAX);
+
+ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &intf_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep_desc) &&
+ ep_i++ < __MT_EP_IN_MAX) {
+ dev->in_eps[ep_i - 1] = usb_endpoint_num(ep_desc);
+ dev->in_max_packet = usb_endpoint_maxp(ep_desc);
+ /* Note: this is ignored by usb sub-system but vendor
+ * code does it. We can drop this at some point.
+ */
+ dev->in_eps[ep_i - 1] |= USB_DIR_IN;
+ } else if (usb_endpoint_is_bulk_out(ep_desc) &&
+ ep_o++ < __MT_EP_OUT_MAX) {
+ dev->out_eps[ep_o - 1] = usb_endpoint_num(ep_desc);
+ dev->out_max_packet = usb_endpoint_maxp(ep_desc);
+ }
+ }
+
+ if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) {
+ dev_err(dev->dev, "Error: wrong pipe number in:%d out:%d\n",
+ ep_i, ep_o);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mt7601u_probe(struct usb_interface *usb_intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
+ struct mt7601u_dev *dev;
+ u32 asic_rev, mac_rev;
+ int ret;
+
+ dev = mt7601u_alloc_device(&usb_intf->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ usb_dev = usb_get_dev(usb_dev);
+ usb_reset_device(usb_dev);
+
+ usb_set_intfdata(usb_intf, dev);
+
+ ret = mt7601u_assign_pipes(usb_intf, dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+
+ asic_rev = mt7601u_rr(dev, MT_ASIC_VERSION);
+ mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
+ dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
+ asic_rev, mac_rev);
+
+ /* Note: vendor driver skips this check for MT7601U */
+ if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
+ dev_warn(dev->dev, "Warning: eFUSE not present\n");
+
+ ret = mt7601u_init_hardware(dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_register_device(dev);
+ if (ret)
+ goto err_hw;
+
+ set_bit(MT7601U_STATE_INITIALIZED, &dev->state);
+
+ return 0;
+err_hw:
+ mt7601u_cleanup(dev);
+err:
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ destroy_workqueue(dev->stat_wq);
+ ieee80211_free_hw(dev->hw);
+ return ret;
+}
+
+static void mt7601u_disconnect(struct usb_interface *usb_intf)
+{
+ struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+ ieee80211_unregister_hw(dev->hw);
+ mt7601u_cleanup(dev);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ destroy_workqueue(dev->stat_wq);
+ ieee80211_free_hw(dev->hw);
+}
+
+static int mt7601u_suspend(struct usb_interface *usb_intf, pm_message_t state)
+{
+ struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+ mt7601u_cleanup(dev);
+
+ return 0;
+}
+
+static int mt7601u_resume(struct usb_interface *usb_intf)
+{
+ struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+ return mt7601u_init_hardware(dev);
+}
+
+MODULE_DEVICE_TABLE(usb, mt7601u_device_table);
+MODULE_FIRMWARE(MT7601U_FIRMWARE);
+MODULE_LICENSE("GPL");
+
+static struct usb_driver mt7601u_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7601u_device_table,
+ .probe = mt7601u_probe,
+ .disconnect = mt7601u_disconnect,
+ .suspend = mt7601u_suspend,
+ .resume = mt7601u_resume,
+ .reset_resume = mt7601u_resume,
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt7601u_driver);
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.h b/drivers/net/wireless/mediatek/mt7601u/usb.h
new file mode 100644
index 000000000000..49e188fa3798
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_USB_H
+#define __MT7601U_USB_H
+
+#include "mt7601u.h"
+
+#define MT7601U_FIRMWARE "mt7601u.bin"
+
+#define MT_VEND_REQ_MAX_RETRY 10
+#define MT_VEND_REQ_TOUT_MS 300
+
+#define MT_VEND_DEV_MODE_RESET 1
+
+enum mt_vendor_req {
+ MT_VEND_DEV_MODE = 1,
+ MT_VEND_WRITE = 2,
+ MT_VEND_MULTI_READ = 7,
+ MT_VEND_WRITE_FCE = 0x42,
+};
+
+enum mt_usb_ep_in {
+ MT_EP_IN_PKT_RX,
+ MT_EP_IN_CMD_RESP,
+ __MT_EP_IN_MAX,
+};
+
+enum mt_usb_ep_out {
+ MT_EP_OUT_INBAND_CMD,
+ MT_EP_OUT_AC_BK,
+ MT_EP_OUT_AC_BE,
+ MT_EP_OUT_AC_VI,
+ MT_EP_OUT_AC_VO,
+ MT_EP_OUT_HCCA,
+ __MT_EP_OUT_MAX,
+};
+
+static inline struct usb_device *mt7601u_to_usb_dev(struct mt7601u_dev *mt7601u)
+{
+ return interface_to_usbdev(to_usb_interface(mt7601u->dev));
+}
+
+static inline bool mt7601u_urb_has_error(struct urb *urb)
+{
+ return urb->status &&
+ urb->status != -ENOENT &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN;
+}
+
+bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len,
+ struct mt7601u_dma_buf *buf);
+void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf);
+int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx,
+ struct mt7601u_dma_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context);
+void mt7601u_complete_urb(struct urb *urb);
+
+int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen);
+void mt7601u_vendor_reset(struct mt7601u_dev *dev);
+int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
+ const u16 offset, const u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/util.c b/drivers/net/wireless/mediatek/mt7601u/util.c
new file mode 100644
index 000000000000..7c1787c1ddcd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/util.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+
+void mt76_remove_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ memmove(skb->data + 2, skb->data, len);
+ skb_pull(skb, 2);
+}
+
+int mt76_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+ int ret;
+
+ if (len % 4 == 0)
+ return 0;
+
+ ret = skb_cow(skb, 2);
+ if (ret)
+ return ret;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/util.h b/drivers/net/wireless/mediatek/mt7601u/util.h
new file mode 100644
index 000000000000..b89140bf1210
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/util.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_UTIL_H
+#define __MT76_UTIL_H
+
+/*
+ * Power of two check, this will check
+ * if the mask that has been given contains and contiguous set of bits.
+ * Note that we cannot use the is_power_of_2() function since this
+ * check must be done at compile-time.
+ */
+#define is_power_of_two(x) ( !((x) & ((x)-1)) )
+#define low_bit_mask(x) ( ((x)-1) & ~(x) )
+#define is_valid_mask(x) is_power_of_two(1LU + (x) + low_bit_mask(x))
+
+/*
+ * Macros to find first set bit in a variable.
+ * These macros behave the same as the __ffs() functions but
+ * the most important difference that this is done during
+ * compile-time rather then run-time.
+ */
+#define compile_ffs2(__x) \
+ __builtin_choose_expr(((__x) & 0x1), 0, 1)
+
+#define compile_ffs4(__x) \
+ __builtin_choose_expr(((__x) & 0x3), \
+ (compile_ffs2((__x))), \
+ (compile_ffs2((__x) >> 2) + 2))
+
+#define compile_ffs8(__x) \
+ __builtin_choose_expr(((__x) & 0xf), \
+ (compile_ffs4((__x))), \
+ (compile_ffs4((__x) >> 4) + 4))
+
+#define compile_ffs16(__x) \
+ __builtin_choose_expr(((__x) & 0xff), \
+ (compile_ffs8((__x))), \
+ (compile_ffs8((__x) >> 8) + 8))
+
+#define compile_ffs32(__x) \
+ __builtin_choose_expr(((__x) & 0xffff), \
+ (compile_ffs16((__x))), \
+ (compile_ffs16((__x) >> 16) + 16))
+
+/*
+ * This macro will check the requirements for the FIELD{8,16,32} macros
+ * The mask should be a constant non-zero contiguous set of bits which
+ * does not exceed the given typelimit.
+ */
+#define FIELD_CHECK(__mask) \
+ BUILD_BUG_ON(!(__mask) || !is_valid_mask(__mask))
+
+#define MT76_SET(_mask, _val) \
+ ({ \
+ FIELD_CHECK(_mask); \
+ (((u32) (_val)) << compile_ffs32(_mask)) & _mask; \
+ })
+
+#define MT76_GET(_mask, _val) \
+ ({ \
+ FIELD_CHECK(_mask); \
+ (u32) (((_val) & _mask) >> compile_ffs32(_mask)); \
+ })
+
+#endif
diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/mwifiex/11h.c
index 3ab87a855122..65cd461c88db 100644
--- a/drivers/net/wireless/mwifiex/11h.c
+++ b/drivers/net/wireless/mwifiex/11h.c
@@ -134,8 +134,8 @@ void mwifiex_dfs_cac_work_queue(struct work_struct *work)
chandef = priv->dfs_chandef;
if (priv->wdev.cac_started) {
- dev_dbg(priv->adapter->dev,
- "CAC timer finished; No radar detected\n");
+ mwifiex_dbg(priv->adapter, MSG,
+ "CAC timer finished; No radar detected\n");
cfg80211_cac_event(priv->netdev, &chandef,
NL80211_RADAR_CAC_FINISHED,
GFP_KERNEL);
@@ -161,9 +161,9 @@ int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
cr_req->chan_desc.chan_width = radar_params->chandef->width;
cr_req->msec_dwell_time = cpu_to_le32(radar_params->cac_time_ms);
- dev_dbg(priv->adapter->dev,
- "11h: issuing DFS Radar check for channel=%d\n",
- radar_params->chandef->chan->hw_value);
+ mwifiex_dbg(priv->adapter, MSG,
+ "11h: issuing DFS Radar check for channel=%d\n",
+ radar_params->chandef->chan->hw_value);
return 0;
}
@@ -174,8 +174,8 @@ int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
void mwifiex_abort_cac(struct mwifiex_private *priv)
{
if (priv->wdev.cac_started) {
- dev_dbg(priv->adapter->dev,
- "Aborting delayed work for CAC.\n");
+ mwifiex_dbg(priv->adapter, MSG,
+ "Aborting delayed work for CAC.\n");
cancel_delayed_work_sync(&priv->dfs_cac_work);
cfg80211_cac_event(priv->netdev, &priv->dfs_chandef,
NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
@@ -199,7 +199,8 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv,
sizeof(u32));
if (le32_to_cpu(rpt_event->result) != HostCmd_RESULT_OK) {
- dev_err(priv->adapter->dev, "Error in channel report event\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Error in channel report event\n");
return -1;
}
@@ -212,8 +213,8 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv,
switch (le16_to_cpu(rpt->header.type)) {
case TLV_TYPE_CHANRPT_11H_BASIC:
if (rpt->map.radar) {
- dev_notice(priv->adapter->dev,
- "RADAR Detected on channel %d!\n",
+ mwifiex_dbg(priv->adapter, MSG,
+ "RADAR Detected on channel %d!\n",
priv->dfs_chandef.chan->hw_value);
cancel_delayed_work_sync(&priv->dfs_cac_work);
cfg80211_cac_event(priv->netdev,
@@ -242,16 +243,17 @@ int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv,
rdr_event = (void *)(skb->data + sizeof(u32));
if (le32_to_cpu(rdr_event->passed)) {
- dev_notice(priv->adapter->dev,
- "radar detected; indicating kernel\n");
+ mwifiex_dbg(priv->adapter, MSG,
+ "radar detected; indicating kernel\n");
cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef,
GFP_KERNEL);
- dev_dbg(priv->adapter->dev, "regdomain: %d\n",
- rdr_event->reg_domain);
- dev_dbg(priv->adapter->dev, "radar detection type: %d\n",
- rdr_event->det_type);
+ mwifiex_dbg(priv->adapter, MSG, "regdomain: %d\n",
+ rdr_event->reg_domain);
+ mwifiex_dbg(priv->adapter, MSG, "radar detection type: %d\n",
+ rdr_event->det_type);
} else {
- dev_dbg(priv->adapter->dev, "false radar detection event!\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "false radar detection event!\n");
}
return 0;
@@ -276,20 +278,20 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
bss_cfg = &priv->bss_cfg;
if (!bss_cfg->beacon_period) {
- dev_err(priv->adapter->dev,
- "channel switch: AP already stopped\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "channel switch: AP already stopped\n");
return;
}
mwifiex_uap_set_channel(bss_cfg, priv->dfs_chandef);
if (mwifiex_config_start_uap(priv, bss_cfg)) {
- dev_dbg(priv->adapter->dev,
- "Failed to start AP after channel switch\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to start AP after channel switch\n");
return;
}
- dev_notice(priv->adapter->dev,
- "indicating channel switch completion to kernel\n");
+ mwifiex_dbg(priv->adapter, MSG,
+ "indicating channel switch completion to kernel\n");
cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef);
}
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 433bd6837c79..8422986cd7a9 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -42,7 +42,7 @@ int mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
priv->wdev.wiphy->bands[radio_type];
if (WARN_ON_ONCE(!sband)) {
- dev_err(priv->adapter->dev, "Invalid radio type!\n");
+ mwifiex_dbg(priv->adapter, ERROR, "Invalid radio type!\n");
return -EINVAL;
}
@@ -184,7 +184,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr);
if (tx_ba_tbl) {
- dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
+ mwifiex_dbg(priv->adapter, EVENT, "info: BA stream complete\n");
tx_ba_tbl->ba_status = BA_SETUP_COMPLETE;
if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
priv->add_ba_param.tx_amsdu &&
@@ -197,7 +197,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
ra_list->ba_status = BA_SETUP_COMPLETE;
}
} else {
- dev_err(priv->adapter->dev, "BA stream not created\n");
+ mwifiex_dbg(priv->adapter, ERROR, "BA stream not created\n");
}
return 0;
@@ -224,7 +224,8 @@ int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
tx_buf->action = cpu_to_le16(action);
switch (action) {
case HostCmd_ACT_GEN_SET:
- dev_dbg(priv->adapter->dev, "cmd: set tx_buf=%d\n", *buf_size);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: set tx_buf=%d\n", *buf_size);
tx_buf->buff_size = cpu_to_le16(*buf_size);
break;
case HostCmd_ACT_GEN_GET:
@@ -466,7 +467,8 @@ void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
mwifiex_is_tx_ba_stream_ptr_valid(priv, tx_ba_tsr_tbl))
return;
- dev_dbg(priv->adapter->dev, "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl);
list_del(&tx_ba_tsr_tbl->list);
@@ -563,7 +565,7 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
unsigned long flags;
u16 block_ack_param_set;
- dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid);
+ mwifiex_dbg(priv->adapter, CMD, "cmd: %s: tid %d\n", __func__, tid);
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
@@ -575,9 +577,9 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
if (!sta_ptr) {
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- dev_warn(priv->adapter->dev,
- "BA setup with unknown TDLS peer %pM!\n",
- peer_mac);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "BA setup with unknown TDLS peer %pM!\n",
+ peer_mac);
return -1;
}
if (sta_ptr->is_11ac_enabled)
@@ -706,8 +708,8 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
rx_reo_tbl->tid = (u16) tx_ba_tsr_tbl->tid;
- dev_dbg(priv->adapter->dev, "data: %s tid=%d\n",
- __func__, rx_reo_tbl->tid);
+ mwifiex_dbg(priv->adapter, DATA, "data: %s tid=%d\n",
+ __func__, rx_reo_tbl->tid);
memcpy(rx_reo_tbl->ra, tx_ba_tsr_tbl->ra, ETH_ALEN);
rx_reo_tbl->amsdu = tx_ba_tsr_tbl->amsdu;
rx_reo_tbl++;
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 6183e255e62a..f7c717253a66 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -187,7 +187,6 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
GFP_ATOMIC | GFP_DMA);
if (!skb_aggr) {
- dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
return -1;
@@ -297,13 +296,13 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
- dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+ mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
break;
case -1:
if (adapter->iface_type != MWIFIEX_PCIE)
adapter->data_sent = false;
- dev_err(adapter->dev, "%s: host_to_card failed: %#x\n",
- __func__, ret);
+ mwifiex_dbg(adapter, ERROR, "%s: host_to_card failed: %#x\n",
+ __func__, ret);
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
return 0;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index f75f8acfaca0..39d7a957674c 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -51,8 +51,8 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
rx_skb = __skb_dequeue(&list);
ret = mwifiex_recv_packet(priv, rx_skb);
if (ret == -1)
- dev_err(priv->adapter->dev,
- "Rx of A-MSDU failed");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Rx of A-MSDU failed");
}
return 0;
}
@@ -304,7 +304,7 @@ mwifiex_flush_data(unsigned long context)
if (seq_num < 0)
return;
- dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", seq_num);
+ mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
start_win);
@@ -367,8 +367,9 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
}
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- dev_dbg(priv->adapter->dev, "info: last_seq=%d start_win=%d\n",
- last_seq, new_node->start_win);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: last_seq=%d start_win=%d\n",
+ last_seq, new_node->start_win);
if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
last_seq >= new_node->start_win) {
@@ -382,8 +383,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
GFP_KERNEL);
if (!new_node->rx_reorder_ptr) {
kfree((u8 *) new_node);
- dev_err(priv->adapter->dev,
- "%s: failed to alloc reorder_ptr\n", __func__);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: failed to alloc reorder_ptr\n", __func__);
return;
}
@@ -467,9 +468,9 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
cmd_addba_req->peer_mac_addr);
if (!sta_ptr) {
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- dev_warn(priv->adapter->dev,
- "BA setup with unknown TDLS peer %pM!\n",
- cmd_addba_req->peer_mac_addr);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "BA setup with unknown TDLS peer %pM!\n",
+ cmd_addba_req->peer_mac_addr);
return -1;
}
if (sta_ptr->is_11ac_enabled)
@@ -573,14 +574,14 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
}
if (tbl->flags & RXREOR_FORCE_NO_DROP) {
- dev_dbg(priv->adapter->dev,
- "RXREOR_FORCE_NO_DROP when HS is activated\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "RXREOR_FORCE_NO_DROP when HS is activated\n");
tbl->flags &= ~RXREOR_FORCE_NO_DROP;
} else if (init_window_shift && seq_num < start_win &&
seq_num >= tbl->init_win) {
- dev_dbg(priv->adapter->dev,
- "Sender TID sequence number reset %d->%d for SSN %d\n",
- start_win, seq_num, tbl->init_win);
+ mwifiex_dbg(priv->adapter, INFO,
+ "Sender TID sequence number reset %d->%d for SSN %d\n",
+ start_win, seq_num, tbl->init_win);
tbl->start_win = start_win = seq_num;
end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
} else {
@@ -668,23 +669,23 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
else
cleanup_rx_reorder_tbl = (initiator) ? false : true;
- dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d initiator=%d\n",
- peer_mac, tid, initiator);
+ mwifiex_dbg(priv->adapter, EVENT, "event: DELBA: %pM tid=%d initiator=%d\n",
+ peer_mac, tid, initiator);
if (cleanup_rx_reorder_tbl) {
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
peer_mac);
if (!tbl) {
- dev_dbg(priv->adapter->dev,
- "event: TID, TA not found in table\n");
+ mwifiex_dbg(priv->adapter, EVENT,
+ "event: TID, TA not found in table\n");
return;
}
mwifiex_del_rx_reorder_entry(priv, tbl);
} else {
ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
if (!ptx_tbl) {
- dev_dbg(priv->adapter->dev,
- "event: TID, RA not found in table\n");
+ mwifiex_dbg(priv->adapter, EVENT,
+ "event: TID, RA not found in table\n");
return;
}
ra_list = mwifiex_wmm_get_ralist_node(priv, tid, peer_mac);
@@ -721,8 +722,8 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
* the stream
*/
if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
- dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n",
- add_ba_rsp->peer_mac_addr, tid);
+ mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
+ add_ba_rsp->peer_mac_addr, tid);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
@@ -746,8 +747,8 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
tbl->amsdu = false;
}
- dev_dbg(priv->adapter->dev,
- "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size);
return 0;
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index aa01c9bc77f9..48edf387683e 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -12,6 +12,7 @@ config MWIFIEX_SDIO
tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897"
depends on MWIFIEX && MMC
select FW_LOADER
+ select WANT_DEV_COREDUMP
---help---
This adds support for wireless adapters based on Marvell
8786/8787/8797/8887/8897 chipsets with SDIO interface.
@@ -23,6 +24,7 @@ config MWIFIEX_PCIE
tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897"
depends on MWIFIEX && PCI
select FW_LOADER
+ select WANT_DEV_COREDUMP
---help---
This adds support for wireless adapters based on Marvell
8766/8897 chipsets with PCIe interface.
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
index 31928caeeed2..2f0f9b5609d0 100644
--- a/drivers/net/wireless/mwifiex/README
+++ b/drivers/net/wireless/mwifiex/README
@@ -230,9 +230,9 @@ getlog
cat getlog
-fw_dump
- This command is used to dump firmware memory into files.
- Separate file will be created for each memory segment.
+device_dump
+ This command is used to dump driver information and firmware memory
+ segments.
Usage:
cat fw_dump
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index bf9020ff2d33..4eecedadefbf 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -104,11 +104,11 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) {
- wiphy_err(wiphy, "deleting the crypto keys\n");
+ mwifiex_dbg(priv->adapter, ERROR, "deleting the crypto keys\n");
return -EFAULT;
}
- wiphy_dbg(wiphy, "info: crypto keys deleted\n");
+ mwifiex_dbg(priv->adapter, INFO, "info: crypto keys deleted\n");
return 0;
}
@@ -163,7 +163,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
if (!buf || !len) {
- wiphy_err(wiphy, "invalid buffer and length\n");
+ mwifiex_dbg(priv->adapter, ERROR, "invalid buffer and length\n");
return -EFAULT;
}
@@ -172,8 +172,8 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
ieee80211_is_probe_resp(mgmt->frame_control)) {
/* Since we support offload probe resp, we need to skip probe
* resp in AP or GO mode */
- wiphy_dbg(wiphy,
- "info: skip to send probe resp in AP or GO mode\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: skip to send probe resp in AP or GO mode\n");
return 0;
}
@@ -183,7 +183,8 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
pkt_len + sizeof(pkt_len));
if (!skb) {
- wiphy_err(wiphy, "allocate skb failed for management frame\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "allocate skb failed for management frame\n");
return -ENOMEM;
}
@@ -206,7 +207,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
mwifiex_queue_tx_pkt(priv, skb);
- wiphy_dbg(wiphy, "info: management frame transmitted\n");
+ mwifiex_dbg(priv->adapter, INFO, "info: management frame transmitted\n");
return 0;
}
@@ -231,7 +232,7 @@ mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
HostCmd_ACT_GEN_SET, 0,
&priv->mgmt_frame_mask, false);
- wiphy_dbg(wiphy, "info: mgmt frame registered\n");
+ mwifiex_dbg(priv->adapter, INFO, "info: mgmt frame registered\n");
}
}
@@ -248,13 +249,14 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
int ret;
if (!chan || !cookie) {
- wiphy_err(wiphy, "Invalid parameter for ROC\n");
+ mwifiex_dbg(priv->adapter, ERROR, "Invalid parameter for ROC\n");
return -EINVAL;
}
if (priv->roc_cfg.cookie) {
- wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llx\n",
- priv->roc_cfg.cookie);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ongoing ROC, cookie = 0x%llx\n",
+ priv->roc_cfg.cookie);
return -EBUSY;
}
@@ -269,7 +271,8 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
cfg80211_ready_on_channel(wdev, *cookie, chan,
duration, GFP_ATOMIC);
- wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ROC, cookie = 0x%llx\n", *cookie);
}
return ret;
@@ -298,7 +301,8 @@ mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg));
- wiphy_dbg(wiphy, "info: cancel ROC, cookie = 0x%llx\n", cookie);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: cancel ROC, cookie = 0x%llx\n", cookie);
}
return ret;
@@ -344,8 +348,8 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy,
u32 ps_mode;
if (timeout)
- wiphy_dbg(wiphy,
- "info: ignore timeout value for IEEE Power Save\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ignore timeout value for IEEE Power Save\n");
ps_mode = enabled;
@@ -370,7 +374,7 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
priv->wep_key_curr_index = key_index;
} else if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index,
NULL, 0)) {
- wiphy_err(wiphy, "set default Tx key index\n");
+ mwifiex_dbg(priv->adapter, ERROR, "set default Tx key index\n");
return -EFAULT;
}
@@ -407,7 +411,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
if (mwifiex_set_encode(priv, params, params->key, params->key_len,
key_index, peer_mac, 0)) {
- wiphy_err(wiphy, "crypto keys added\n");
+ mwifiex_dbg(priv->adapter, ERROR, "crypto keys added\n");
return -EFAULT;
}
@@ -442,7 +446,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
band = mwifiex_band_to_radio_type(adapter->config_bands);
if (!wiphy->bands[band]) {
- wiphy_err(wiphy, "11D: setting domain info in FW\n");
+ mwifiex_dbg(adapter, ERROR,
+ "11D: setting domain info in FW\n");
return -1;
}
@@ -493,7 +498,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
HostCmd_ACT_GEN_SET, 0, NULL, false)) {
- wiphy_err(wiphy, "11D: setting domain info in FW\n");
+ mwifiex_dbg(adapter, INFO,
+ "11D: setting domain info in FW\n");
return -1;
}
@@ -516,9 +522,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
struct mwifiex_private *priv = mwifiex_get_priv(adapter,
MWIFIEX_BSS_ROLE_ANY);
-
- wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for %c%c\n",
- request->alpha2[0], request->alpha2[1]);
+ mwifiex_dbg(adapter, INFO,
+ "info: cfg80211 regulatory domain callback for %c%c\n",
+ request->alpha2[0], request->alpha2[1]);
switch (request->initiator) {
case NL80211_REGDOM_SET_BY_DRIVER:
@@ -527,8 +533,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
break;
default:
- wiphy_err(wiphy, "unknown regdom initiator: %d\n",
- request->initiator);
+ mwifiex_dbg(adapter, ERROR,
+ "unknown regdom initiator: %d\n",
+ request->initiator);
return;
}
@@ -597,8 +604,8 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
switch (priv->bss_role) {
case MWIFIEX_BSS_ROLE_UAP:
if (priv->bss_started) {
- dev_err(adapter->dev,
- "cannot change wiphy params when bss started");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot change wiphy params when bss started");
return -EINVAL;
}
@@ -622,15 +629,16 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
kfree(bss_cfg);
if (ret) {
- wiphy_err(wiphy, "Failed to set wiphy phy params\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to set wiphy phy params\n");
return ret;
}
break;
case MWIFIEX_BSS_ROLE_STA:
if (priv->media_connected) {
- dev_err(adapter->dev,
- "cannot change wiphy params when connected");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot change wiphy params when connected");
return -EINVAL;
}
if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
@@ -724,8 +732,8 @@ static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
HostCmd_ACT_GEN_SET, 0,
&priv->mgmt_frame_mask, false)) {
- dev_warn(priv->adapter->dev,
- "could not unregister mgmt frame rx\n");
+ mwifiex_dbg(adapter, ERROR,
+ "could not unregister mgmt frame rx\n");
return -1;
}
@@ -789,9 +797,9 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
break;
default:
- dev_err(priv->adapter->dev,
- "%s: changing to %d not supported\n",
- dev->name, type);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: changing to %d not supported\n",
+ dev->name, type);
return -EOPNOTSUPP;
}
@@ -824,12 +832,13 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
if (adapter->curr_iface_comb.p2p_intf ==
adapter->iface_limit.p2p_intf) {
- dev_err(adapter->dev,
- "cannot create multiple P2P ifaces\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create multiple P2P ifaces\n");
return -1;
}
- dev_dbg(priv->adapter->dev, "%s: changing role to p2p\n", dev->name);
+ mwifiex_dbg(adapter, INFO,
+ "%s: changing role to p2p\n", dev->name);
if (mwifiex_deinit_priv_params(priv))
return -1;
@@ -846,9 +855,9 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
return -EFAULT;
break;
default:
- dev_err(priv->adapter->dev,
- "%s: changing to %d not supported\n",
- dev->name, type);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: changing to %d not supported\n",
+ dev->name, type);
return -EOPNOTSUPP;
}
@@ -897,17 +906,17 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
curr_iftype != NL80211_IFTYPE_P2P_GO) &&
(adapter->curr_iface_comb.sta_intf ==
adapter->iface_limit.sta_intf)) {
- dev_err(adapter->dev,
- "cannot create multiple station/adhoc ifaces\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create multiple station/adhoc ifaces\n");
return -1;
}
if (type == NL80211_IFTYPE_STATION)
- dev_notice(adapter->dev,
- "%s: changing role to station\n", dev->name);
+ mwifiex_dbg(adapter, INFO,
+ "%s: changing role to station\n", dev->name);
else
- dev_notice(adapter->dev,
- "%s: changing role to adhoc\n", dev->name);
+ mwifiex_dbg(adapter, INFO,
+ "%s: changing role to adhoc\n", dev->name);
if (mwifiex_deinit_priv_params(priv))
return -1;
@@ -954,12 +963,13 @@ mwifiex_change_vif_to_ap(struct net_device *dev,
if (adapter->curr_iface_comb.uap_intf ==
adapter->iface_limit.uap_intf) {
- dev_err(adapter->dev,
- "cannot create multiple AP ifaces\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create multiple AP ifaces\n");
return -1;
}
- dev_notice(adapter->dev, "%s: changing role to AP\n", dev->name);
+ mwifiex_dbg(adapter, INFO,
+ "%s: changing role to AP\n", dev->name);
if (mwifiex_deinit_priv_params(priv))
return -1;
@@ -1020,12 +1030,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
flags, params);
case NL80211_IFTYPE_UNSPECIFIED:
- wiphy_warn(wiphy, "%s: kept type as IBSS\n", dev->name);
+ mwifiex_dbg(priv->adapter, INFO,
+ "%s: kept type as IBSS\n", dev->name);
case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */
return 0;
default:
- wiphy_err(wiphy, "%s: changing to %d not supported\n",
- dev->name, type);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: changing to %d not supported\n",
+ dev->name, type);
return -EOPNOTSUPP;
}
break;
@@ -1048,12 +1060,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
flags, params);
case NL80211_IFTYPE_UNSPECIFIED:
- wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name);
+ mwifiex_dbg(priv->adapter, INFO,
+ "%s: kept type as STA\n", dev->name);
case NL80211_IFTYPE_STATION: /* This shouldn't happen */
return 0;
default:
- wiphy_err(wiphy, "%s: changing to %d not supported\n",
- dev->name, type);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: changing to %d not supported\n",
+ dev->name, type);
return -EOPNOTSUPP;
}
break;
@@ -1070,12 +1084,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
return mwifiex_change_vif_to_p2p(dev, curr_iftype,
type, flags, params);
case NL80211_IFTYPE_UNSPECIFIED:
- wiphy_warn(wiphy, "%s: kept type as AP\n", dev->name);
+ mwifiex_dbg(priv->adapter, INFO,
+ "%s: kept type as AP\n", dev->name);
case NL80211_IFTYPE_AP: /* This shouldn't happen */
return 0;
default:
- wiphy_err(wiphy, "%s: changing to %d not supported\n",
- dev->name, type);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: changing to %d not supported\n",
+ dev->name, type);
return -EOPNOTSUPP;
}
break;
@@ -1100,19 +1116,22 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
flags, params);
case NL80211_IFTYPE_UNSPECIFIED:
- wiphy_warn(wiphy, "%s: kept type as P2P\n", dev->name);
+ mwifiex_dbg(priv->adapter, INFO,
+ "%s: kept type as P2P\n", dev->name);
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
return 0;
default:
- wiphy_err(wiphy, "%s: changing to %d not supported\n",
- dev->name, type);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: changing to %d not supported\n",
+ dev->name, type);
return -EOPNOTSUPP;
}
break;
default:
- wiphy_err(wiphy, "%s: unknown iftype: %d\n",
- dev->name, dev->ieee80211_ptr->iftype);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: unknown iftype: %d\n",
+ dev->name, dev->ieee80211_ptr->iftype);
return -EOPNOTSUPP;
}
@@ -1206,12 +1225,14 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
/* Get signal information from the firmware */
if (mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
HostCmd_ACT_GEN_GET, 0, NULL, true)) {
- dev_err(priv->adapter->dev, "failed to get signal information\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "failed to get signal information\n");
return -EFAULT;
}
if (mwifiex_drv_get_data_rate(priv, &rate)) {
- dev_err(priv->adapter->dev, "getting data rate\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "getting data rate error\n");
return -EFAULT;
}
@@ -1295,7 +1316,7 @@ mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats;
enum ieee80211_band band;
- dev_dbg(priv->adapter->dev, "dump_survey idx=%d\n", idx);
+ mwifiex_dbg(priv->adapter, DUMP, "dump_survey idx=%d\n", idx);
memset(survey, 0, sizeof(struct survey_info));
@@ -1472,8 +1493,8 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
struct mwifiex_adapter *adapter = priv->adapter;
if (!priv->media_connected) {
- dev_err(adapter->dev,
- "Can not set Tx data rate in disconnected state\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Can not set Tx data rate in disconnected state\n");
return -EINVAL;
}
@@ -1556,17 +1577,20 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) {
- wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: bss_type mismatched\n", __func__);
return -EINVAL;
}
if (!priv->bss_started) {
- wiphy_err(wiphy, "%s: bss not started\n", __func__);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: bss not started\n", __func__);
return -EINVAL;
}
if (mwifiex_set_mgmt_ies(priv, data)) {
- wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: setting mgmt ies failed\n", __func__);
return -EFAULT;
}
@@ -1594,7 +1618,8 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
if (!params->mac || is_broadcast_ether_addr(params->mac))
return 0;
- wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac);
+ mwifiex_dbg(priv->adapter, INFO, "%s: mac address %pM\n",
+ __func__, params->mac);
eth_zero_addr(deauth_mac);
@@ -1687,14 +1712,16 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
mwifiex_abort_cac(priv);
if (mwifiex_del_mgmt_ies(priv))
- wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to delete mgmt IEs!\n");
priv->ap_11n_enabled = 0;
memset(&priv->bss_cfg, 0, sizeof(priv->bss_cfg));
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
HostCmd_ACT_GEN_SET, 0, NULL, true)) {
- wiphy_err(wiphy, "Failed to stop the BSS\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to stop the BSS\n");
return -1;
}
@@ -1756,7 +1783,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
kfree(bss_cfg);
- wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to parse secuirty parameters!\n");
return -1;
}
@@ -1778,17 +1806,19 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
if (mwifiex_is_11h_active(priv) &&
!cfg80211_chandef_dfs_required(wiphy, &params->chandef,
priv->bss_mode)) {
- dev_dbg(priv->adapter->dev, "Disable 11h extensions in FW\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "Disable 11h extensions in FW\n");
if (mwifiex_11h_activate(priv, false)) {
- dev_err(priv->adapter->dev,
- "Failed to disable 11h extensions!!");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to disable 11h extensions!!");
return -1;
}
priv->state_11h.is_11h_active = true;
}
if (mwifiex_config_start_uap(priv, bss_cfg)) {
- wiphy_err(wiphy, "Failed to start AP\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to start AP\n");
kfree(bss_cfg);
return -1;
}
@@ -1816,8 +1846,9 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
- wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
- " reason code %d\n", priv->cfg_bssid, reason_code);
+ mwifiex_dbg(priv->adapter, MSG,
+ "info: successfully disconnected from %pM:\t"
+ "reason code %d\n", priv->cfg_bssid, reason_code);
eth_zero_addr(priv->cfg_bssid);
priv->hs2_enabled = false;
@@ -1899,13 +1930,13 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
req_ssid.ssid_len = ssid_len;
if (ssid_len > IEEE80211_MAX_SSID_LEN) {
- dev_err(priv->adapter->dev, "invalid SSID - aborting\n");
+ mwifiex_dbg(priv->adapter, ERROR, "invalid SSID - aborting\n");
return -EINVAL;
}
memcpy(req_ssid.ssid, ssid, ssid_len);
if (!req_ssid.ssid_len || req_ssid.ssid[0] < 0x20) {
- dev_err(priv->adapter->dev, "invalid SSID - aborting\n");
+ mwifiex_dbg(priv->adapter, ERROR, "invalid SSID - aborting\n");
return -EINVAL;
}
@@ -1959,9 +1990,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
if (sme->key) {
if (mwifiex_is_alg_wep(priv->sec_info.encryption_mode)) {
- dev_dbg(priv->adapter->dev,
- "info: setting wep encryption"
- " with key len %d\n", sme->key_len);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: setting wep encryption\t"
+ "with key len %d\n", sme->key_len);
priv->wep_key_curr_index = sme->key_idx;
ret = mwifiex_set_encode(priv, NULL, sme->key,
sme->key_len, sme->key_idx,
@@ -1978,7 +2009,7 @@ done:
if (is_scanning_required) {
/* Do specific SSID scanning */
if (mwifiex_request_scan(priv, &req_ssid)) {
- dev_err(priv->adapter->dev, "scan error\n");
+ mwifiex_dbg(priv->adapter, ERROR, "scan error\n");
return -EFAULT;
}
}
@@ -1997,15 +2028,15 @@ done:
if (!bss) {
if (is_scanning_required) {
- dev_warn(priv->adapter->dev,
- "assoc: requested bss not found in scan results\n");
+ mwifiex_dbg(priv->adapter, WARN,
+ "assoc: requested bss not found in scan results\n");
break;
}
is_scanning_required = 1;
} else {
- dev_dbg(priv->adapter->dev,
- "info: trying to associate to '%s' bssid %pM\n",
- (char *) req_ssid.ssid, bss->bssid);
+ mwifiex_dbg(priv->adapter, MSG,
+ "info: trying to associate to '%s' bssid %pM\n",
+ (char *)req_ssid.ssid, bss->bssid);
memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
break;
}
@@ -2041,26 +2072,29 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
int ret;
if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
- wiphy_err(wiphy,
- "%s: reject infra assoc request in non-STA role\n",
- dev->name);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: reject infra assoc request in non-STA role\n",
+ dev->name);
return -EINVAL;
}
if (priv->wdev.current_bss) {
- wiphy_warn(wiphy, "%s: already connected\n", dev->name);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: already connected\n", dev->name);
return -EALREADY;
}
if (adapter->surprise_removed || adapter->is_cmd_timedout) {
- wiphy_err(wiphy,
- "%s: Ignore connection. Card removed or FW in bad state\n",
- dev->name);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: Ignore connection.\t"
+ "Card removed or FW in bad state\n",
+ dev->name);
return -EFAULT;
}
- wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
- (char *) sme->ssid, sme->bssid);
+ mwifiex_dbg(adapter, INFO,
+ "info: Trying to associate to %s and bssid %pM\n",
+ (char *)sme->ssid, sme->bssid);
ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
priv->bss_mode, sme->channel, sme, 0);
@@ -2068,17 +2102,17 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0,
NULL, 0, WLAN_STATUS_SUCCESS,
GFP_KERNEL);
- dev_dbg(priv->adapter->dev,
- "info: associated to bssid %pM successfully\n",
- priv->cfg_bssid);
+ mwifiex_dbg(priv->adapter, MSG,
+ "info: associated to bssid %pM successfully\n",
+ priv->cfg_bssid);
if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
priv->adapter->auto_tdls &&
priv->bss_type == MWIFIEX_BSS_TYPE_STA)
mwifiex_setup_auto_tdls_timer(priv);
} else {
- dev_dbg(priv->adapter->dev,
- "info: association to bssid %pM failed\n",
- priv->cfg_bssid);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "info: association to bssid %pM failed\n",
+ priv->cfg_bssid);
eth_zero_addr(priv->cfg_bssid);
if (ret > 0)
@@ -2105,7 +2139,6 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
struct cfg80211_ibss_params *params)
{
- struct wiphy *wiphy = priv->wdev.wiphy;
struct mwifiex_adapter *adapter = priv->adapter;
int index = 0, i;
u8 config_bands = 0;
@@ -2162,8 +2195,10 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
priv->adhoc_channel = ieee80211_frequency_to_channel(
params->chandef.chan->center_freq);
- wiphy_dbg(wiphy, "info: set ibss band %d, chan %d, chan offset %d\n",
- config_bands, priv->adhoc_channel, adapter->sec_chan_offset);
+ mwifiex_dbg(adapter, INFO,
+ "info: set ibss band %d, chan %d, chan offset %d\n",
+ config_bands, priv->adhoc_channel,
+ adapter->sec_chan_offset);
return 0;
}
@@ -2182,13 +2217,15 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
int ret = 0;
if (priv->bss_mode != NL80211_IFTYPE_ADHOC) {
- wiphy_err(wiphy, "request to join ibss received "
- "when station is not in ibss mode\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "request to join ibss received\t"
+ "when station is not in ibss mode\n");
goto done;
}
- wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
- (char *) params->ssid, params->bssid);
+ mwifiex_dbg(priv->adapter, MSG,
+ "info: trying to join to %s and bssid %pM\n",
+ (char *)params->ssid, params->bssid);
mwifiex_set_ibss_params(priv, params);
@@ -2200,12 +2237,12 @@ done:
if (!ret) {
cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
params->chandef.chan, GFP_KERNEL);
- dev_dbg(priv->adapter->dev,
- "info: joined/created adhoc network with bssid"
- " %pM successfully\n", priv->cfg_bssid);
+ mwifiex_dbg(priv->adapter, MSG,
+ "info: joined/created adhoc network with bssid\t"
+ "%pM successfully\n", priv->cfg_bssid);
} else {
- dev_dbg(priv->adapter->dev,
- "info: failed creating/joining adhoc network\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "info: failed creating/joining adhoc network\n");
}
return ret;
@@ -2222,8 +2259,8 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n",
- priv->cfg_bssid);
+ mwifiex_dbg(priv->adapter, MSG, "info: disconnecting from essid %pM\n",
+ priv->cfg_bssid);
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
@@ -2250,13 +2287,15 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
struct ieee_types_header *ie;
struct mwifiex_user_scan_cfg *user_scan_cfg;
- wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
+ mwifiex_dbg(priv->adapter, CMD,
+ "info: received scan request on %s\n", dev->name);
/* Block scan request if scan operation or scan cleanup when interface
* is disabled is in process
*/
if (priv->scan_request || priv->scan_aborting) {
- dev_err(priv->adapter->dev, "cmd: Scan already in process..\n");
+ mwifiex_dbg(priv->adapter, WARN,
+ "cmd: Scan already in process..\n");
return -EBUSY;
}
@@ -2308,7 +2347,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
ret = mwifiex_scan_networks(priv, user_scan_cfg);
kfree(user_scan_cfg);
if (ret) {
- dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "scan failed: %d\n", ret);
priv->scan_aborting = false;
priv->scan_request = NULL;
return ret;
@@ -2454,15 +2494,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_ADHOC:
if (adapter->curr_iface_comb.sta_intf ==
adapter->iface_limit.sta_intf) {
- wiphy_err(wiphy,
- "cannot create multiple sta/adhoc ifaces\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create multiple sta/adhoc ifaces\n");
return ERR_PTR(-EINVAL);
}
priv = mwifiex_get_unused_priv(adapter);
if (!priv) {
- wiphy_err(wiphy,
- "could not get free private struct\n");
+ mwifiex_dbg(adapter, ERROR,
+ "could not get free private struct\n");
return ERR_PTR(-EFAULT);
}
@@ -2484,15 +2524,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_AP:
if (adapter->curr_iface_comb.uap_intf ==
adapter->iface_limit.uap_intf) {
- wiphy_err(wiphy,
- "cannot create multiple AP ifaces\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create multiple AP ifaces\n");
return ERR_PTR(-EINVAL);
}
priv = mwifiex_get_unused_priv(adapter);
if (!priv) {
- wiphy_err(wiphy,
- "could not get free private struct\n");
+ mwifiex_dbg(adapter, ERROR,
+ "could not get free private struct\n");
return ERR_PTR(-EFAULT);
}
@@ -2511,15 +2551,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_CLIENT:
if (adapter->curr_iface_comb.p2p_intf ==
adapter->iface_limit.p2p_intf) {
- wiphy_err(wiphy,
- "cannot create multiple P2P ifaces\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create multiple P2P ifaces\n");
return ERR_PTR(-EINVAL);
}
priv = mwifiex_get_unused_priv(adapter);
if (!priv) {
- wiphy_err(wiphy,
- "could not get free private struct\n");
+ mwifiex_dbg(adapter, ERROR,
+ "could not get free private struct\n");
return ERR_PTR(-EFAULT);
}
@@ -2550,7 +2590,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
break;
default:
- wiphy_err(wiphy, "type not supported\n");
+ mwifiex_dbg(adapter, ERROR, "type not supported\n");
return ERR_PTR(-EINVAL);
}
@@ -2558,7 +2598,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
name_assign_type, ether_setup,
IEEE80211_NUM_ACS, 1);
if (!dev) {
- wiphy_err(wiphy, "no memory available for netdevice\n");
+ mwifiex_dbg(adapter, ERROR,
+ "no memory available for netdevice\n");
memset(&priv->wdev, 0, sizeof(priv->wdev));
priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -2599,7 +2640,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
/* Register network device */
if (register_netdevice(dev)) {
- wiphy_err(wiphy, "cannot register virtual network device\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot register virtual network device\n");
free_netdev(dev);
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
priv->netdev = NULL;
@@ -2613,7 +2655,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
WQ_MEM_RECLAIM |
WQ_UNBOUND, 1, name);
if (!priv->dfs_cac_workqueue) {
- wiphy_err(wiphy, "cannot register virtual network device\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot register virtual network device\n");
free_netdev(dev);
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
priv->netdev = NULL;
@@ -2628,7 +2671,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
WQ_HIGHPRI | WQ_UNBOUND |
WQ_MEM_RECLAIM, 1, name);
if (!priv->dfs_chan_sw_workqueue) {
- wiphy_err(wiphy, "cannot register virtual network device\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot register virtual network device\n");
free_netdev(dev);
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
priv->netdev = NULL;
@@ -2642,7 +2686,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
sema_init(&priv->async_sem, 1);
- dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: Marvell 802.11 Adapter\n", dev->name);
#ifdef CONFIG_DEBUG_FS
mwifiex_dev_debugfs_init(priv);
@@ -2661,7 +2706,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
adapter->curr_iface_comb.p2p_intf++;
break;
default:
- wiphy_err(wiphy, "type not supported\n");
+ mwifiex_dbg(adapter, ERROR, "type not supported\n");
return ERR_PTR(-EINVAL);
}
@@ -2721,7 +2766,8 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
adapter->curr_iface_comb.p2p_intf++;
break;
default:
- dev_err(adapter->dev, "del_virtual_intf: type not supported\n");
+ mwifiex_dbg(adapter, ERROR,
+ "del_virtual_intf: type not supported\n");
break;
}
@@ -2839,7 +2885,8 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
if (!mwifiex_is_pattern_supported(&wowlan->patterns[i],
byte_seq,
MWIFIEX_MEF_MAX_BYTESEQ)) {
- dev_err(priv->adapter->dev, "Pattern not supported\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Pattern not supported\n");
kfree(mef_entry);
return -EOPNOTSUPP;
}
@@ -2954,21 +3001,22 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
mwifiex_cancel_all_pending_cmd(adapter);
if (!wowlan) {
- dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
+ mwifiex_dbg(adapter, ERROR,
+ "None of the WOWLAN triggers enabled\n");
return 0;
}
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
if (!priv->media_connected) {
- dev_warn(adapter->dev,
- "Can not configure WOWLAN in disconnected state\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Can not configure WOWLAN in disconnected state\n");
return 0;
}
ret = mwifiex_set_mef_filter(priv, wowlan);
if (ret) {
- dev_err(adapter->dev, "Failed to set MEF filter\n");
+ mwifiex_dbg(adapter, ERROR, "Failed to set MEF filter\n");
return ret;
}
@@ -2981,7 +3029,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
MWIFIEX_SYNC_CMD, &hs_cfg);
if (ret) {
- dev_err(adapter->dev, "Failed to set HS params\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to set HS params\n");
return ret;
}
}
@@ -3041,7 +3090,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv,
if (!mwifiex_is_pattern_supported(&crule->patterns[i],
byte_seq,
MWIFIEX_COALESCE_MAX_BYTESEQ)) {
- dev_err(priv->adapter->dev, "Pattern not supported\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Pattern not supported\n");
return -EOPNOTSUPP;
}
@@ -3050,8 +3100,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv,
pkt_type = mwifiex_get_coalesce_pkt_type(byte_seq);
if (pkt_type && mrule->pkt_type) {
- dev_err(priv->adapter->dev,
- "Multiple packet types not allowed\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Multiple packet types not allowed\n");
return -EOPNOTSUPP;
} else if (pkt_type) {
mrule->pkt_type = pkt_type;
@@ -3074,8 +3124,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv,
}
if (!mrule->pkt_type) {
- dev_err(priv->adapter->dev,
- "Packet type can not be determined\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Packet type can not be determined\n");
return -EOPNOTSUPP;
}
@@ -3093,8 +3143,8 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
memset(&coalesce_cfg, 0, sizeof(coalesce_cfg));
if (!coalesce) {
- dev_dbg(adapter->dev,
- "Disable coalesce and reset all previous rules\n");
+ mwifiex_dbg(adapter, WARN,
+ "Disable coalesce and reset all previous rules\n");
return mwifiex_send_cmd(priv, HostCmd_CMD_COALESCE_CFG,
HostCmd_ACT_GEN_SET, 0,
&coalesce_cfg, true);
@@ -3105,8 +3155,8 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
ret = mwifiex_fill_coalesce_rule_info(priv, &coalesce->rules[i],
&coalesce_cfg.rule[i]);
if (ret) {
- dev_err(priv->adapter->dev,
- "Recheck the patterns provided for rule %d\n",
+ mwifiex_dbg(adapter, ERROR,
+ "Recheck the patterns provided for rule %d\n",
i + 1);
return ret;
}
@@ -3138,9 +3188,9 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
- dev_dbg(priv->adapter->dev,
- "Send TDLS Setup Request to %pM status_code=%d\n", peer,
- status_code);
+ mwifiex_dbg(priv->adapter, MSG,
+ "Send TDLS Setup Request to %pM status_code=%d\n",
+ peer, status_code);
mwifiex_add_auto_tdls_peer(priv, peer);
ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
dialog_token, status_code,
@@ -3148,45 +3198,45 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
break;
case WLAN_TDLS_SETUP_RESPONSE:
mwifiex_add_auto_tdls_peer(priv, peer);
- dev_dbg(priv->adapter->dev,
- "Send TDLS Setup Response to %pM status_code=%d\n",
- peer, status_code);
+ mwifiex_dbg(priv->adapter, MSG,
+ "Send TDLS Setup Response to %pM status_code=%d\n",
+ peer, status_code);
ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
dialog_token, status_code,
extra_ies, extra_ies_len);
break;
case WLAN_TDLS_SETUP_CONFIRM:
- dev_dbg(priv->adapter->dev,
- "Send TDLS Confirm to %pM status_code=%d\n", peer,
- status_code);
+ mwifiex_dbg(priv->adapter, MSG,
+ "Send TDLS Confirm to %pM status_code=%d\n", peer,
+ status_code);
ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
dialog_token, status_code,
extra_ies, extra_ies_len);
break;
case WLAN_TDLS_TEARDOWN:
- dev_dbg(priv->adapter->dev, "Send TDLS Tear down to %pM\n",
- peer);
+ mwifiex_dbg(priv->adapter, MSG,
+ "Send TDLS Tear down to %pM\n", peer);
ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
dialog_token, status_code,
extra_ies, extra_ies_len);
break;
case WLAN_TDLS_DISCOVERY_REQUEST:
- dev_dbg(priv->adapter->dev,
- "Send TDLS Discovery Request to %pM\n", peer);
+ mwifiex_dbg(priv->adapter, MSG,
+ "Send TDLS Discovery Request to %pM\n", peer);
ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
dialog_token, status_code,
extra_ies, extra_ies_len);
break;
case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
- dev_dbg(priv->adapter->dev,
- "Send TDLS Discovery Response to %pM\n", peer);
+ mwifiex_dbg(priv->adapter, MSG,
+ "Send TDLS Discovery Response to %pM\n", peer);
ret = mwifiex_send_tdls_action_frame(priv, peer, action_code,
dialog_token, status_code,
extra_ies, extra_ies_len);
break;
default:
- dev_warn(priv->adapter->dev,
- "Unknown TDLS mgmt/action frame %pM\n", peer);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Unknown TDLS mgmt/action frame %pM\n", peer);
ret = -EINVAL;
break;
}
@@ -3208,8 +3258,8 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
return -ENOTSUPP;
- dev_dbg(priv->adapter->dev,
- "TDLS peer=%pM, oper=%d\n", peer, action);
+ mwifiex_dbg(priv->adapter, MSG,
+ "TDLS peer=%pM, oper=%d\n", peer, action);
switch (action) {
case NL80211_TDLS_ENABLE_LINK:
@@ -3220,22 +3270,22 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
break;
case NL80211_TDLS_TEARDOWN:
/* shouldn't happen!*/
- dev_warn(priv->adapter->dev,
- "tdls_oper: teardown from driver not supported\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "tdls_oper: teardown from driver not supported\n");
return -EINVAL;
case NL80211_TDLS_SETUP:
/* shouldn't happen!*/
- dev_warn(priv->adapter->dev,
- "tdls_oper: setup from driver not supported\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "tdls_oper: setup from driver not supported\n");
return -EINVAL;
case NL80211_TDLS_DISCOVERY_REQ:
/* shouldn't happen!*/
- dev_warn(priv->adapter->dev,
- "tdls_oper: discovery from driver not supported\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "tdls_oper: discovery from driver not supported\n");
return -EINVAL;
default:
- dev_err(priv->adapter->dev,
- "tdls_oper: operation not supported\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "tdls_oper: operation not supported\n");
return -ENOTSUPP;
}
@@ -3268,8 +3318,8 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
if (priv->adapter->scan_processing) {
- dev_err(priv->adapter->dev,
- "radar detection: scan in process...\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "radar detection: scan in process...\n");
return -EBUSY;
}
@@ -3284,8 +3334,8 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
params->beacon_csa.tail,
params->beacon_csa.tail_len);
if (!chsw_ie) {
- dev_err(priv->adapter->dev,
- "Could not parse channel switch announcement IE\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Could not parse channel switch announcement IE\n");
return -EINVAL;
}
@@ -3297,10 +3347,12 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
}
if (mwifiex_del_mgmt_ies(priv))
- wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to delete mgmt IEs!\n");
if (mwifiex_set_mgmt_ies(priv, &params->beacon_csa)) {
- wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: setting mgmt ies failed\n", __func__);
return -EFAULT;
}
@@ -3324,16 +3376,17 @@ mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy,
struct mwifiex_radar_params radar_params;
if (priv->adapter->scan_processing) {
- dev_err(priv->adapter->dev,
- "radar detection: scan already in process...\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "radar detection: scan already in process...\n");
return -EBUSY;
}
if (!mwifiex_is_11h_active(priv)) {
- dev_dbg(priv->adapter->dev, "Enable 11h extensions in FW\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "Enable 11h extensions in FW\n");
if (mwifiex_11h_activate(priv, true)) {
- dev_err(priv->adapter->dev,
- "Failed to activate 11h extensions!!");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to activate 11h extensions!!");
return -1;
}
priv->state_11h.is_11h_active = true;
@@ -3492,7 +3545,8 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy = wiphy_new(&mwifiex_cfg80211_ops,
sizeof(struct mwifiex_adapter *));
if (!wiphy) {
- dev_err(adapter->dev, "%s: creating new wiphy\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: creating new wiphy\n", __func__);
return -ENOMEM;
}
wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
@@ -3563,20 +3617,22 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
ret = wiphy_register(wiphy);
if (ret < 0) {
- dev_err(adapter->dev,
- "%s: wiphy_register failed: %d\n", __func__, ret);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: wiphy_register failed: %d\n", __func__, ret);
wiphy_free(wiphy);
return ret;
}
if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) {
- wiphy_info(wiphy, "driver hint alpha2: %2.2s\n", reg_alpha2);
+ mwifiex_dbg(adapter, INFO,
+ "driver hint alpha2: %2.2s\n", reg_alpha2);
regulatory_hint(wiphy, reg_alpha2);
} else {
country_code = mwifiex_11d_code_2_region(adapter->region_code);
if (country_code)
- wiphy_info(wiphy, "ignoring F/W country code %2.2s\n",
- country_code);
+ mwifiex_dbg(adapter, WARN,
+ "ignoring F/W country code %2.2s\n",
+ country_code);
}
mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index e9df8826f124..3ddb8ec676ed 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -327,8 +327,9 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
sband = priv->wdev.wiphy->bands[IEEE80211_BAND_5GHZ];
if (!sband) {
- dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d\n",
- __func__, band);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: cannot find cfp by band %d\n",
+ __func__, band);
return cfp;
}
@@ -349,9 +350,10 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
}
}
if (i == sband->n_channels) {
- dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
- " & channel=%d freq=%d\n", __func__, band, channel,
- freq);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: cannot find cfp by band %d\t"
+ "& channel=%d freq=%d\n",
+ __func__, band, channel, freq);
} else {
if (!ch)
return cfp;
@@ -431,15 +433,17 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
switch (adapter->config_bands) {
case BAND_B:
- dev_dbg(adapter->dev, "info: infra band=%d "
- "supported_rates_b\n", adapter->config_bands);
+ mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+ "supported_rates_b\n",
+ adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_b,
sizeof(supported_rates_b));
break;
case BAND_G:
case BAND_G | BAND_GN:
- dev_dbg(adapter->dev, "info: infra band=%d "
- "supported_rates_g\n", adapter->config_bands);
+ mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+ "supported_rates_g\n",
+ adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_g,
sizeof(supported_rates_g));
break;
@@ -449,15 +453,17 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN:
case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC:
case BAND_B | BAND_G | BAND_GN:
- dev_dbg(adapter->dev, "info: infra band=%d "
- "supported_rates_bg\n", adapter->config_bands);
+ mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+ "supported_rates_bg\n",
+ adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_bg,
sizeof(supported_rates_bg));
break;
case BAND_A:
case BAND_A | BAND_G:
- dev_dbg(adapter->dev, "info: infra band=%d "
- "supported_rates_a\n", adapter->config_bands);
+ mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+ "supported_rates_a\n",
+ adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_a,
sizeof(supported_rates_a));
break;
@@ -466,14 +472,16 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
case BAND_A | BAND_AN | BAND_AAC:
case BAND_A | BAND_G | BAND_AN | BAND_GN:
case BAND_A | BAND_G | BAND_AN | BAND_GN | BAND_AAC:
- dev_dbg(adapter->dev, "info: infra band=%d "
- "supported_rates_a\n", adapter->config_bands);
+ mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+ "supported_rates_a\n",
+ adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_a,
sizeof(supported_rates_a));
break;
case BAND_GN:
- dev_dbg(adapter->dev, "info: infra band=%d "
- "supported_rates_n\n", adapter->config_bands);
+ mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+ "supported_rates_n\n",
+ adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_n,
sizeof(supported_rates_n));
break;
@@ -482,25 +490,25 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
/* Ad-hoc mode */
switch (adapter->adhoc_start_band) {
case BAND_B:
- dev_dbg(adapter->dev, "info: adhoc B\n");
+ mwifiex_dbg(adapter, INFO, "info: adhoc B\n");
k = mwifiex_copy_rates(rates, k, adhoc_rates_b,
sizeof(adhoc_rates_b));
break;
case BAND_G:
case BAND_G | BAND_GN:
- dev_dbg(adapter->dev, "info: adhoc G only\n");
+ mwifiex_dbg(adapter, INFO, "info: adhoc G only\n");
k = mwifiex_copy_rates(rates, k, adhoc_rates_g,
sizeof(adhoc_rates_g));
break;
case BAND_B | BAND_G:
case BAND_B | BAND_G | BAND_GN:
- dev_dbg(adapter->dev, "info: adhoc BG\n");
+ mwifiex_dbg(adapter, INFO, "info: adhoc BG\n");
k = mwifiex_copy_rates(rates, k, adhoc_rates_bg,
sizeof(adhoc_rates_bg));
break;
case BAND_A:
case BAND_A | BAND_AN:
- dev_dbg(adapter->dev, "info: adhoc A\n");
+ mwifiex_dbg(adapter, INFO, "info: adhoc A\n");
k = mwifiex_copy_rates(rates, k, adhoc_rates_a,
sizeof(adhoc_rates_a));
break;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index c5a14ff7eb82..a1de83fd1dbe 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -62,7 +62,8 @@ mwifiex_get_cmd_node(struct mwifiex_adapter *adapter)
spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
if (list_empty(&adapter->cmd_free_q)) {
- dev_err(adapter->dev, "GET_CMD_NODE: cmd node not available\n");
+ mwifiex_dbg(adapter, ERROR,
+ "GET_CMD_NODE: cmd node not available\n");
spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
return NULL;
}
@@ -116,7 +117,8 @@ static int mwifiex_cmd_host_cmd(struct mwifiex_private *priv,
{
/* Copy the HOST command to command buffer */
memcpy(cmd, pcmd_ptr->cmd, pcmd_ptr->len);
- dev_dbg(priv->adapter->dev, "cmd: host cmd size = %d\n", pcmd_ptr->len);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: host cmd size = %d\n", pcmd_ptr->len);
return 0;
}
@@ -147,8 +149,9 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
/* Sanity test */
if (host_cmd == NULL || host_cmd->size == 0) {
- dev_err(adapter->dev, "DNLD_CMD: host_cmd is null"
- " or cmd size is 0, not sending\n");
+ mwifiex_dbg(adapter, ERROR,
+ "DNLD_CMD: host_cmd is null\t"
+ "or cmd size is 0, not sending\n");
if (cmd_node->wait_q_enabled)
adapter->cmd_wait_q.status = -1;
mwifiex_recycle_cmd_node(adapter, cmd_node);
@@ -161,8 +164,8 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET &&
cmd_code != HostCmd_CMD_FUNC_SHUTDOWN &&
cmd_code != HostCmd_CMD_FUNC_INIT) {
- dev_err(adapter->dev,
- "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
+ mwifiex_dbg(adapter, ERROR,
+ "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
cmd_code);
if (cmd_node->wait_q_enabled)
mwifiex_complete_cmd(adapter, cmd_node);
@@ -197,10 +200,12 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
*/
skb_put(cmd_node->cmd_skb, cmd_size - cmd_node->cmd_skb->len);
- dev_dbg(adapter->dev,
- "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", cmd_code,
- le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size,
- le16_to_cpu(host_cmd->seq_num));
+ mwifiex_dbg(adapter, CMD,
+ "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
+ cmd_code,
+ le16_to_cpu(*(__le16 *)((u8 *)host_cmd + S_DS_GEN)),
+ cmd_size, le16_to_cpu(host_cmd->seq_num));
+ mwifiex_dbg_dump(adapter, CMD_D, "cmd buffer:", host_cmd, cmd_size);
if (adapter->iface_type == MWIFIEX_USB) {
tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD);
@@ -221,7 +226,8 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
}
if (ret == -1) {
- dev_err(adapter->dev, "DNLD_CMD: host to card failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "DNLD_CMD: host to card failed\n");
if (adapter->iface_type == MWIFIEX_USB)
adapter->cmd_sent = false;
if (cmd_node->wait_q_enabled)
@@ -280,12 +286,14 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
(adapter->seq_num, priv->bss_num,
priv->bss_type)));
- dev_dbg(adapter->dev,
- "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
+ mwifiex_dbg(adapter, CMD,
+ "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
le16_to_cpu(sleep_cfm_buf->command),
le16_to_cpu(sleep_cfm_buf->action),
le16_to_cpu(sleep_cfm_buf->size),
le16_to_cpu(sleep_cfm_buf->seq_num));
+ mwifiex_dbg_dump(adapter, CMD_D, "SLEEP_CFM buffer: ", sleep_cfm_buf,
+ le16_to_cpu(sleep_cfm_buf->size));
if (adapter->iface_type == MWIFIEX_USB) {
sleep_cfm_tmp =
@@ -311,7 +319,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
}
if (ret == -1) {
- dev_err(adapter->dev, "SLEEP_CFM: failed\n");
+ mwifiex_dbg(adapter, ERROR, "SLEEP_CFM: failed\n");
adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++;
return -1;
}
@@ -362,8 +370,9 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter)
for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
cmd_array[i].skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER);
if (!cmd_array[i].skb) {
- dev_err(adapter->dev, "ALLOC_CMD_BUF: out of memory\n");
- return -1;
+ mwifiex_dbg(adapter, ERROR,
+ "unable to allocate command buffer\n");
+ return -ENOMEM;
}
}
@@ -386,7 +395,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
/* Need to check if cmd pool is allocated or not */
if (!adapter->cmd_pool) {
- dev_dbg(adapter->dev, "info: FREE_CMD_BUF: cmd_pool is null\n");
+ mwifiex_dbg(adapter, FATAL,
+ "info: FREE_CMD_BUF: cmd_pool is null\n");
return 0;
}
@@ -395,7 +405,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
/* Release shared memory buffers */
for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
if (cmd_array[i].skb) {
- dev_dbg(adapter->dev, "cmd: free cmd buffer %d\n", i);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: free cmd buffer %d\n", i);
dev_kfree_skb_any(cmd_array[i].skb);
}
if (!cmd_array[i].resp_skb)
@@ -409,7 +420,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
}
/* Release struct cmd_ctrl_node */
if (adapter->cmd_pool) {
- dev_dbg(adapter->dev, "cmd: free cmd pool\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: free cmd pool\n");
kfree(adapter->cmd_pool);
adapter->cmd_pool = NULL;
}
@@ -459,7 +471,8 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
rx_info->bss_type = priv->bss_type;
}
- dev_dbg(adapter->dev, "EVENT: cause: %#x\n", eventcause);
+ mwifiex_dbg(adapter, EVENT, "EVENT: cause: %#x\n", eventcause);
+ mwifiex_dbg_dump(adapter, EVT_D, "Event Buf:", skb->data, skb->len);
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
ret = mwifiex_process_uap_event(priv);
@@ -498,28 +511,33 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
}
if (adapter->is_suspended) {
- dev_err(adapter->dev, "PREP_CMD: device in suspended state\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: device in suspended state\n");
return -1;
}
if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
- dev_err(adapter->dev, "PREP_CMD: host entering sleep state\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: host entering sleep state\n");
return -1;
}
if (adapter->surprise_removed) {
- dev_err(adapter->dev, "PREP_CMD: card is removed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: card is removed\n");
return -1;
}
if (adapter->is_cmd_timedout) {
- dev_err(adapter->dev, "PREP_CMD: FW is in bad state\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: FW is in bad state\n");
return -1;
}
if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) {
if (cmd_no != HostCmd_CMD_FUNC_INIT) {
- dev_err(adapter->dev, "PREP_CMD: FW in reset state\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: FW in reset state\n");
return -1;
}
}
@@ -528,7 +546,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
cmd_node = mwifiex_get_cmd_node(adapter);
if (!cmd_node) {
- dev_err(adapter->dev, "PREP_CMD: no free cmd node\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: no free cmd node\n");
return -1;
}
@@ -536,7 +555,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf, sync);
if (!cmd_node->cmd_skb) {
- dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: no free cmd buf\n");
return -1;
}
@@ -571,7 +591,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
/* Return error, since the command preparation failed */
if (ret) {
- dev_err(adapter->dev, "PREP_CMD: cmd %#x preparation failed\n",
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: cmd %#x preparation failed\n",
cmd_no);
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
return -1;
@@ -626,7 +647,8 @@ void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
atomic_dec(&adapter->cmd_pending);
- dev_dbg(adapter->dev, "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
+ mwifiex_dbg(adapter, CMD,
+ "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
le16_to_cpu(host_cmd->command),
atomic_read(&adapter->cmd_pending));
}
@@ -648,7 +670,7 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
if (!host_cmd) {
- dev_err(adapter->dev, "QUEUE_CMD: host_cmd is NULL\n");
+ mwifiex_dbg(adapter, ERROR, "QUEUE_CMD: host_cmd is NULL\n");
return;
}
@@ -673,7 +695,8 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
atomic_inc(&adapter->cmd_pending);
- dev_dbg(adapter->dev, "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n",
+ mwifiex_dbg(adapter, CMD,
+ "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n",
command, atomic_read(&adapter->cmd_pending));
}
@@ -699,7 +722,8 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
/* Check if already in processing */
if (adapter->curr_cmd) {
- dev_err(adapter->dev, "EXEC_NEXT_CMD: cmd in processing\n");
+ mwifiex_dbg(adapter, FATAL,
+ "EXEC_NEXT_CMD: cmd in processing\n");
return -1;
}
@@ -721,8 +745,9 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
priv = cmd_node->priv;
if (adapter->ps_state != PS_STATE_AWAKE) {
- dev_err(adapter->dev, "%s: cannot send cmd in sleep state,"
- " this should not happen\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: cannot send cmd in sleep state,\t"
+ "this should not happen\n", __func__);
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
return ret;
}
@@ -772,8 +797,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) {
resp = (struct host_cmd_ds_command *) adapter->upld_buf;
- dev_err(adapter->dev, "CMD_RESP: NULL curr_cmd, %#x\n",
- le16_to_cpu(resp->command));
+ mwifiex_dbg(adapter, ERROR,
+ "CMD_RESP: NULL curr_cmd, %#x\n",
+ le16_to_cpu(resp->command));
return -1;
}
@@ -781,8 +807,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
- dev_err(adapter->dev, "CMD_RESP: %#x been canceled\n",
- le16_to_cpu(resp->command));
+ mwifiex_dbg(adapter, ERROR,
+ "CMD_RESP: %#x been canceled\n",
+ le16_to_cpu(resp->command));
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->curr_cmd = NULL;
@@ -794,7 +821,8 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
/* Copy original response back to response buffer */
struct mwifiex_ds_misc_cmd *hostcmd;
uint16_t size = le16_to_cpu(resp->size);
- dev_dbg(adapter->dev, "info: host cmd resp size = %d\n", size);
+ mwifiex_dbg(adapter, INFO,
+ "info: host cmd resp size = %d\n", size);
size = min_t(u16, size, MWIFIEX_SIZE_OF_CMD_BUFFER);
if (adapter->curr_cmd->data_buf) {
hostcmd = adapter->curr_cmd->data_buf;
@@ -822,13 +850,15 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
adapter->dbg.last_cmd_resp_id[adapter->dbg.last_cmd_resp_index] =
orig_cmdresp_no;
- dev_dbg(adapter->dev,
- "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
- orig_cmdresp_no, cmdresp_result,
- le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num));
+ mwifiex_dbg(adapter, CMD,
+ "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
+ orig_cmdresp_no, cmdresp_result,
+ le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num));
+ mwifiex_dbg_dump(adapter, CMD_D, "CMD_RESP buffer:", resp,
+ le16_to_cpu(resp->size));
if (!(orig_cmdresp_no & HostCmd_RET_BIT)) {
- dev_err(adapter->dev, "CMD_RESP: invalid cmd resp\n");
+ mwifiex_dbg(adapter, ERROR, "CMD_RESP: invalid cmd resp\n");
if (adapter->curr_cmd->wait_q_enabled)
adapter->cmd_wait_q.status = -1;
@@ -852,8 +882,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
/* Check init command response */
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
if (ret) {
- dev_err(adapter->dev, "%s: cmd %#x failed during "
- "initialization\n", __func__, cmdresp_no);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: cmd %#x failed during\t"
+ "initialization\n", __func__, cmdresp_no);
mwifiex_init_fw_complete(adapter);
return -1;
} else if (adapter->last_init_cmd == cmdresp_no)
@@ -888,7 +919,8 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
adapter->is_cmd_timedout = 1;
if (!adapter->curr_cmd) {
- dev_dbg(adapter->dev, "cmd: empty curr_cmd\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cmd: empty curr_cmd\n");
return;
}
cmd_node = adapter->curr_cmd;
@@ -897,47 +929,60 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
adapter->dbg.timeout_cmd_act =
adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index];
- dev_err(adapter->dev,
- "%s: Timeout cmd id = %#x, act = %#x\n", __func__,
- adapter->dbg.timeout_cmd_id,
- adapter->dbg.timeout_cmd_act);
-
- dev_err(adapter->dev, "num_data_h2c_failure = %d\n",
- adapter->dbg.num_tx_host_to_card_failure);
- dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n",
- adapter->dbg.num_cmd_host_to_card_failure);
-
- dev_err(adapter->dev, "is_cmd_timedout = %d\n",
- adapter->is_cmd_timedout);
- dev_err(adapter->dev, "num_tx_timeout = %d\n",
- adapter->dbg.num_tx_timeout);
-
- dev_err(adapter->dev, "last_cmd_index = %d\n",
- adapter->dbg.last_cmd_index);
- dev_err(adapter->dev, "last_cmd_id: %*ph\n",
- (int)sizeof(adapter->dbg.last_cmd_id),
- adapter->dbg.last_cmd_id);
- dev_err(adapter->dev, "last_cmd_act: %*ph\n",
- (int)sizeof(adapter->dbg.last_cmd_act),
- adapter->dbg.last_cmd_act);
-
- dev_err(adapter->dev, "last_cmd_resp_index = %d\n",
- adapter->dbg.last_cmd_resp_index);
- dev_err(adapter->dev, "last_cmd_resp_id: %*ph\n",
- (int)sizeof(adapter->dbg.last_cmd_resp_id),
- adapter->dbg.last_cmd_resp_id);
-
- dev_err(adapter->dev, "last_event_index = %d\n",
- adapter->dbg.last_event_index);
- dev_err(adapter->dev, "last_event: %*ph\n",
- (int)sizeof(adapter->dbg.last_event),
- adapter->dbg.last_event);
-
- dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n",
- adapter->data_sent, adapter->cmd_sent);
-
- dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
- adapter->ps_mode, adapter->ps_state);
+ mwifiex_dbg(adapter, MSG,
+ "%s: Timeout cmd id = %#x, act = %#x\n", __func__,
+ adapter->dbg.timeout_cmd_id,
+ adapter->dbg.timeout_cmd_act);
+
+ mwifiex_dbg(adapter, MSG,
+ "num_data_h2c_failure = %d\n",
+ adapter->dbg.num_tx_host_to_card_failure);
+ mwifiex_dbg(adapter, MSG,
+ "num_cmd_h2c_failure = %d\n",
+ adapter->dbg.num_cmd_host_to_card_failure);
+
+ mwifiex_dbg(adapter, MSG,
+ "is_cmd_timedout = %d\n",
+ adapter->is_cmd_timedout);
+ mwifiex_dbg(adapter, MSG,
+ "num_tx_timeout = %d\n",
+ adapter->dbg.num_tx_timeout);
+
+ mwifiex_dbg(adapter, MSG,
+ "last_cmd_index = %d\n",
+ adapter->dbg.last_cmd_index);
+ mwifiex_dbg(adapter, MSG,
+ "last_cmd_id: %*ph\n",
+ (int)sizeof(adapter->dbg.last_cmd_id),
+ adapter->dbg.last_cmd_id);
+ mwifiex_dbg(adapter, MSG,
+ "last_cmd_act: %*ph\n",
+ (int)sizeof(adapter->dbg.last_cmd_act),
+ adapter->dbg.last_cmd_act);
+
+ mwifiex_dbg(adapter, MSG,
+ "last_cmd_resp_index = %d\n",
+ adapter->dbg.last_cmd_resp_index);
+ mwifiex_dbg(adapter, MSG,
+ "last_cmd_resp_id: %*ph\n",
+ (int)sizeof(adapter->dbg.last_cmd_resp_id),
+ adapter->dbg.last_cmd_resp_id);
+
+ mwifiex_dbg(adapter, MSG,
+ "last_event_index = %d\n",
+ adapter->dbg.last_event_index);
+ mwifiex_dbg(adapter, MSG,
+ "last_event: %*ph\n",
+ (int)sizeof(adapter->dbg.last_event),
+ adapter->dbg.last_event);
+
+ mwifiex_dbg(adapter, MSG,
+ "data_sent=%d cmd_sent=%d\n",
+ adapter->data_sent, adapter->cmd_sent);
+
+ mwifiex_dbg(adapter, MSG,
+ "ps_mode=%d ps_state=%d\n",
+ adapter->ps_mode, adapter->ps_state);
if (cmd_node->wait_q_enabled) {
adapter->cmd_wait_q.status = -ETIMEDOUT;
@@ -948,8 +993,8 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
mwifiex_init_fw_complete(adapter);
- if (adapter->if_ops.fw_dump)
- adapter->if_ops.fw_dump(adapter);
+ if (adapter->if_ops.device_dump)
+ adapter->if_ops.device_dump(adapter);
if (adapter->if_ops.card_reset)
adapter->if_ops.card_reset(adapter);
@@ -1015,7 +1060,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
if (!priv)
continue;
if (priv->scan_request) {
- dev_dbg(adapter->dev, "info: aborting scan\n");
+ mwifiex_dbg(adapter, WARN, "info: aborting scan\n");
cfg80211_scan_done(priv->scan_request, 1);
priv->scan_request = NULL;
}
@@ -1075,7 +1120,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
if (!priv)
continue;
if (priv->scan_request) {
- dev_dbg(adapter->dev, "info: aborting scan\n");
+ mwifiex_dbg(adapter, WARN, "info: aborting scan\n");
cfg80211_scan_done(priv->scan_request, 1);
priv->scan_request = NULL;
}
@@ -1100,11 +1145,11 @@ mwifiex_check_ps_cond(struct mwifiex_adapter *adapter)
!adapter->curr_cmd && !IS_CARD_RX_RCVD(adapter))
mwifiex_dnld_sleep_confirm_cmd(adapter);
else
- dev_dbg(adapter->dev,
- "cmd: Delay Sleep Confirm (%s%s%s)\n",
- (adapter->cmd_sent) ? "D" : "",
- (adapter->curr_cmd) ? "C" : "",
- (IS_CARD_RX_RCVD(adapter)) ? "R" : "");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: Delay Sleep Confirm (%s%s%s)\n",
+ (adapter->cmd_sent) ? "D" : "",
+ (adapter->curr_cmd) ? "C" : "",
+ (IS_CARD_RX_RCVD(adapter)) ? "R" : "");
}
/*
@@ -1120,15 +1165,18 @@ mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
priv->adapter->hs_activated = true;
mwifiex_update_rxreor_flags(priv->adapter,
RXREOR_FORCE_NO_DROP);
- dev_dbg(priv->adapter->dev, "event: hs_activated\n");
+ mwifiex_dbg(priv->adapter, EVENT,
+ "event: hs_activated\n");
priv->adapter->hs_activate_wait_q_woken = true;
wake_up_interruptible(
&priv->adapter->hs_activate_wait_q);
} else {
- dev_dbg(priv->adapter->dev, "event: HS not configured\n");
+ mwifiex_dbg(priv->adapter, EVENT,
+ "event: HS not configured\n");
}
} else {
- dev_dbg(priv->adapter->dev, "event: hs_deactivated\n");
+ mwifiex_dbg(priv->adapter, EVENT,
+ "event: hs_deactivated\n");
priv->adapter->hs_activated = false;
}
}
@@ -1156,11 +1204,12 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
mwifiex_hs_activated_event(priv, true);
return 0;
} else {
- dev_dbg(adapter->dev, "cmd: CMD_RESP: HS_CFG cmd reply"
- " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n",
- resp->result, conditions,
- phs_cfg->params.hs_config.gpio,
- phs_cfg->params.hs_config.gap);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: CMD_RESP: HS_CFG cmd reply\t"
+ " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n",
+ resp->result, conditions,
+ phs_cfg->params.hs_config.gpio,
+ phs_cfg->params.hs_config.gap);
}
if (conditions != HS_CFG_CANCEL) {
adapter->is_hs_configured = true;
@@ -1182,8 +1231,10 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
void
mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
{
- dev_dbg(adapter->dev, "info: %s: auto cancelling host sleep"
- " since there is interrupt from the firmware\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: auto cancelling host sleep\t"
+ "since there is interrupt from the firmware\n",
+ __func__);
adapter->if_ops.wakeup(adapter);
adapter->hs_activated = false;
@@ -1212,13 +1263,14 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
uint16_t seq_num = le16_to_cpu(cmd->seq_num);
if (!upld_len) {
- dev_err(adapter->dev, "%s: cmd size is 0\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: cmd size is 0\n", __func__);
return;
}
- dev_dbg(adapter->dev,
- "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
- command, result, le16_to_cpu(cmd->size), seq_num);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
+ command, result, le16_to_cpu(cmd->size), seq_num);
/* Get BSS number and corresponding priv */
priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num),
@@ -1232,15 +1284,16 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
command &= HostCmd_CMD_ID_MASK;
if (command != HostCmd_CMD_802_11_PS_MODE_ENH) {
- dev_err(adapter->dev,
- "%s: rcvd unexpected resp for cmd %#x, result = %x\n",
- __func__, command, result);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: rcvd unexpected resp for cmd %#x, result = %x\n",
+ __func__, command, result);
return;
}
if (result) {
- dev_err(adapter->dev, "%s: sleep confirm cmd failed\n",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: sleep confirm cmd failed\n",
+ __func__);
adapter->pm_wakeup_card_req = false;
adapter->ps_state = PS_STATE_AWAKE;
return;
@@ -1305,7 +1358,8 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
sizeof(struct mwifiex_ie_types_header));
cmd_size += sizeof(*ps_tlv);
tlv += sizeof(*ps_tlv);
- dev_dbg(adapter->dev, "cmd: PS Command: Enter PS\n");
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: PS Command: Enter PS\n");
ps_mode->null_pkt_interval =
cpu_to_le16(adapter->null_pkt_interval);
ps_mode->multiple_dtims =
@@ -1335,8 +1389,8 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
tlv += sizeof(*auto_ds_tlv);
if (auto_ds)
idletime = auto_ds->idle_time;
- dev_dbg(priv->adapter->dev,
- "cmd: PS Command: Enter Auto Deep Sleep\n");
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: PS Command: Enter Auto Deep Sleep\n");
auto_ds_tlv->deep_sleep_timeout = cpu_to_le16(idletime);
}
cmd->size = cpu_to_le16(cmd_size);
@@ -1363,27 +1417,31 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
uint16_t auto_ps_bitmap =
le16_to_cpu(ps_mode->params.ps_bitmap);
- dev_dbg(adapter->dev,
- "info: %s: PS_MODE cmd reply result=%#x action=%#X\n",
- __func__, resp->result, action);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: PS_MODE cmd reply result=%#x action=%#X\n",
+ __func__, resp->result, action);
if (action == EN_AUTO_PS) {
if (auto_ps_bitmap & BITMAP_AUTO_DS) {
- dev_dbg(adapter->dev, "cmd: Enabled auto deep sleep\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: Enabled auto deep sleep\n");
priv->adapter->is_deep_sleep = true;
}
if (auto_ps_bitmap & BITMAP_STA_PS) {
- dev_dbg(adapter->dev, "cmd: Enabled STA power save\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: Enabled STA power save\n");
if (adapter->sleep_period.period)
- dev_dbg(adapter->dev,
- "cmd: set to uapsd/pps mode\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: set to uapsd/pps mode\n");
}
} else if (action == DIS_AUTO_PS) {
if (ps_bitmap & BITMAP_AUTO_DS) {
priv->adapter->is_deep_sleep = false;
- dev_dbg(adapter->dev, "cmd: Disabled auto deep sleep\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: Disabled auto deep sleep\n");
}
if (ps_bitmap & BITMAP_STA_PS) {
- dev_dbg(adapter->dev, "cmd: Disabled STA power save\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: Disabled STA power save\n");
if (adapter->sleep_period.period) {
adapter->delay_null_pkt = false;
adapter->tx_lock_flag = false;
@@ -1396,7 +1454,8 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
else
adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
- dev_dbg(adapter->dev, "cmd: ps_bitmap=%#x\n", ps_bitmap);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: ps_bitmap=%#x\n", ps_bitmap);
if (pm_cfg) {
/* This section is for get power save mode */
@@ -1533,29 +1592,29 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
api_rev->major_ver;
adapter->key_api_minor_ver =
api_rev->minor_ver;
- dev_dbg(adapter->dev,
- "key_api v%d.%d\n",
- adapter->key_api_major_ver,
- adapter->key_api_minor_ver);
+ mwifiex_dbg(adapter, INFO,
+ "key_api v%d.%d\n",
+ adapter->key_api_major_ver,
+ adapter->key_api_minor_ver);
break;
case FW_API_VER_ID:
adapter->fw_api_ver =
api_rev->major_ver;
- dev_dbg(adapter->dev,
- "Firmware api version %d\n",
- adapter->fw_api_ver);
+ mwifiex_dbg(adapter, INFO,
+ "Firmware api version %d\n",
+ adapter->fw_api_ver);
break;
default:
- dev_warn(adapter->dev,
- "Unknown api_id: %d\n",
- api_id);
+ mwifiex_dbg(adapter, FATAL,
+ "Unknown api_id: %d\n",
+ api_id);
break;
}
break;
default:
- dev_warn(adapter->dev,
- "Unknown GET_HW_SPEC TLV type: %#x\n",
- le16_to_cpu(tlv->type));
+ mwifiex_dbg(adapter, FATAL,
+ "Unknown GET_HW_SPEC TLV type: %#x\n",
+ le16_to_cpu(tlv->type));
break;
}
parsed_len += le16_to_cpu(tlv->len) +
@@ -1565,14 +1624,16 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
}
}
- dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
- adapter->fw_release_number);
- dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
- hw_spec->permanent_addr);
- dev_dbg(adapter->dev,
- "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n",
- le16_to_cpu(hw_spec->hw_if_version),
- le16_to_cpu(hw_spec->version));
+ mwifiex_dbg(adapter, INFO,
+ "info: GET_HW_SPEC: fw_release_number- %#x\n",
+ adapter->fw_release_number);
+ mwifiex_dbg(adapter, INFO,
+ "info: GET_HW_SPEC: permanent addr: %pM\n",
+ hw_spec->permanent_addr);
+ mwifiex_dbg(adapter, INFO,
+ "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n",
+ le16_to_cpu(hw_spec->hw_if_version),
+ le16_to_cpu(hw_spec->version));
ether_addr_copy(priv->adapter->perm_addr, hw_spec->permanent_addr);
adapter->region_code = le16_to_cpu(hw_spec->region_code);
@@ -1585,8 +1646,8 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
/* If it's unidentified region code, use the default (USA) */
if (i >= MWIFIEX_MAX_REGION_CODE) {
adapter->region_code = 0x10;
- dev_dbg(adapter->dev,
- "cmd: unknown region code, use default (USA)\n");
+ mwifiex_dbg(adapter, WARN,
+ "cmd: unknown region code, use default (USA)\n");
}
adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap);
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 1fb329dc6744..5a0636d43a1b 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -152,24 +152,24 @@ free_and_exit:
}
/*
- * Proc firmware dump read handler.
+ * Proc device dump read handler.
*
- * This function is called when the 'fw_dump' file is opened for
+ * This function is called when the 'device_dump' file is opened for
* reading.
- * This function dumps firmware memory in different files
- * (ex. DTCM, ITCM, SQRAM etc.) based on the the segments for
+ * This function dumps driver information and firmware memory segments
+ * (ex. DTCM, ITCM, SQRAM etc.) for
* debugging.
*/
static ssize_t
-mwifiex_fw_dump_read(struct file *file, char __user *ubuf,
- size_t count, loff_t *ppos)
+mwifiex_device_dump_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
{
struct mwifiex_private *priv = file->private_data;
- if (!priv->adapter->if_ops.fw_dump)
+ if (!priv->adapter->if_ops.device_dump)
return -EIO;
- priv->adapter->if_ops.fw_dump(priv->adapter);
+ priv->adapter->if_ops.device_dump(priv->adapter);
return 0;
}
@@ -535,6 +535,144 @@ done:
return ret;
}
+/* Proc debug_mask file read handler.
+ * This function is called when the 'debug_mask' file is opened for reading
+ * This function can be used read driver debugging mask value.
+ */
+static ssize_t
+mwifiex_debug_mask_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mwifiex_private *priv =
+ (struct mwifiex_private *)file->private_data;
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
+ char *buf = (char *)page;
+ size_t ret = 0;
+ int pos = 0;
+
+ if (!buf)
+ return -ENOMEM;
+
+ pos += snprintf(buf, PAGE_SIZE, "debug mask=0x%08x\n",
+ priv->adapter->debug_mask);
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+
+ free_page(page);
+ return ret;
+}
+
+/* Proc debug_mask file read handler.
+ * This function is called when the 'debug_mask' file is opened for reading
+ * This function can be used read driver debugging mask value.
+ */
+static ssize_t
+mwifiex_debug_mask_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+ unsigned long debug_mask;
+ struct mwifiex_private *priv = (void *)file->private_data;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+ char *buf = (void *)addr;
+ size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1));
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, ubuf, buf_size)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (kstrtoul(buf, 0, &debug_mask)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ priv->adapter->debug_mask = debug_mask;
+ ret = count;
+done:
+ free_page(addr);
+ return ret;
+}
+
+/* Proc memrw file write handler.
+ * This function is called when the 'memrw' file is opened for writing
+ * This function can be used to write to a memory location.
+ */
+static ssize_t
+mwifiex_memrw_write(struct file *file, const char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int ret;
+ char cmd;
+ struct mwifiex_ds_mem_rw mem_rw;
+ u16 cmd_action;
+ struct mwifiex_private *priv = (void *)file->private_data;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+ char *buf = (void *)addr;
+ size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1));
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, ubuf, buf_size)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ ret = sscanf(buf, "%c %x %x", &cmd, &mem_rw.addr, &mem_rw.value);
+ if (ret != 3) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if ((cmd == 'r') || (cmd == 'R')) {
+ cmd_action = HostCmd_ACT_GEN_GET;
+ mem_rw.value = 0;
+ } else if ((cmd == 'w') || (cmd == 'W')) {
+ cmd_action = HostCmd_ACT_GEN_SET;
+ } else {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ memcpy(&priv->mem_rw, &mem_rw, sizeof(mem_rw));
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_MEM_ACCESS, cmd_action, 0,
+ &mem_rw, true))
+ ret = -1;
+ else
+ ret = count;
+
+done:
+ free_page(addr);
+ return ret;
+}
+
+/* Proc memrw file read handler.
+ * This function is called when the 'memrw' file is opened for reading
+ * This function can be used to read from a memory location.
+ */
+static ssize_t
+mwifiex_memrw_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mwifiex_private *priv = (void *)file->private_data;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+ char *buf = (char *)addr;
+ int ret, pos = 0;
+
+ if (!buf)
+ return -ENOMEM;
+
+ pos += snprintf(buf, PAGE_SIZE, "0x%x 0x%x\n", priv->mem_rw.addr,
+ priv->mem_rw.value);
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+
+ free_page(addr);
+ return ret;
+}
+
static u32 saved_offset = -1, saved_bytes = -1;
/*
@@ -654,7 +792,8 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf,
memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
if (arg_num > 3) {
- dev_err(priv->adapter->dev, "Too many arguments\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Too many arguments\n");
ret = -EINVAL;
goto done;
}
@@ -746,11 +885,13 @@ static const struct file_operations mwifiex_dfs_##name##_fops = { \
MWIFIEX_DFS_FILE_READ_OPS(info);
MWIFIEX_DFS_FILE_READ_OPS(debug);
MWIFIEX_DFS_FILE_READ_OPS(getlog);
-MWIFIEX_DFS_FILE_READ_OPS(fw_dump);
+MWIFIEX_DFS_FILE_READ_OPS(device_dump);
MWIFIEX_DFS_FILE_OPS(regrdwr);
MWIFIEX_DFS_FILE_OPS(rdeeprom);
+MWIFIEX_DFS_FILE_OPS(memrw);
MWIFIEX_DFS_FILE_OPS(hscfg);
MWIFIEX_DFS_FILE_OPS(histogram);
+MWIFIEX_DFS_FILE_OPS(debug_mask);
/*
* This function creates the debug FS directory structure and the files.
@@ -772,9 +913,11 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
MWIFIEX_DFS_ADD_FILE(getlog);
MWIFIEX_DFS_ADD_FILE(regrdwr);
MWIFIEX_DFS_ADD_FILE(rdeeprom);
- MWIFIEX_DFS_ADD_FILE(fw_dump);
+ MWIFIEX_DFS_ADD_FILE(device_dump);
+ MWIFIEX_DFS_ADD_FILE(memrw);
MWIFIEX_DFS_ADD_FILE(hscfg);
MWIFIEX_DFS_ADD_FILE(histogram);
+ MWIFIEX_DFS_ADD_FILE(debug_mask);
}
/*
diff --git a/drivers/net/wireless/mwifiex/ethtool.c b/drivers/net/wireless/mwifiex/ethtool.c
index 65d8d6d4b6ba..58400c69ab26 100644
--- a/drivers/net/wireless/mwifiex/ethtool.c
+++ b/drivers/net/wireless/mwifiex/ethtool.c
@@ -64,104 +64,7 @@ static int mwifiex_ethtool_set_wol(struct net_device *dev,
return 0;
}
-static int
-mwifiex_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
-{
- struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- struct mwifiex_adapter *adapter = priv->adapter;
- struct memory_type_mapping *entry;
-
- if (!adapter->if_ops.fw_dump)
- return -ENOTSUPP;
-
- dump->flag = adapter->curr_mem_idx;
- dump->version = 1;
- if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) {
- dump->len = adapter->drv_info_size;
- } else if (adapter->curr_mem_idx != MWIFIEX_FW_DUMP_IDX) {
- entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx];
- dump->len = entry->mem_size;
- } else {
- dump->len = 0;
- }
-
- return 0;
-}
-
-static int
-mwifiex_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
- void *buffer)
-{
- u8 *p = buffer;
- struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- struct mwifiex_adapter *adapter = priv->adapter;
- struct memory_type_mapping *entry;
-
- if (!adapter->if_ops.fw_dump)
- return -ENOTSUPP;
-
- if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) {
- if (!adapter->drv_info_dump)
- return -EFAULT;
- memcpy(p, adapter->drv_info_dump, adapter->drv_info_size);
- return 0;
- }
-
- if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) {
- dev_err(adapter->dev, "firmware dump in progress!!\n");
- return -EBUSY;
- }
-
- entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx];
-
- if (!entry->mem_ptr)
- return -EFAULT;
-
- memcpy(p, entry->mem_ptr, entry->mem_size);
-
- entry->mem_size = 0;
- vfree(entry->mem_ptr);
- entry->mem_ptr = NULL;
-
- return 0;
-}
-
-static int mwifiex_set_dump(struct net_device *dev, struct ethtool_dump *val)
-{
- struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- struct mwifiex_adapter *adapter = priv->adapter;
-
- if (!adapter->if_ops.fw_dump)
- return -ENOTSUPP;
-
- if (val->flag == MWIFIEX_DRV_INFO_IDX) {
- adapter->curr_mem_idx = MWIFIEX_DRV_INFO_IDX;
- return 0;
- }
-
- if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) {
- dev_err(adapter->dev, "firmware dump in progress!!\n");
- return -EBUSY;
- }
-
- if (val->flag == MWIFIEX_FW_DUMP_IDX) {
- adapter->curr_mem_idx = val->flag;
- adapter->if_ops.fw_dump(adapter);
- return 0;
- }
-
- if (val->flag < 0 || val->flag >= adapter->num_mem_types)
- return -EINVAL;
-
- adapter->curr_mem_idx = val->flag;
-
- return 0;
-}
-
const struct ethtool_ops mwifiex_ethtool_ops = {
.get_wol = mwifiex_ethtool_get_wol,
.set_wol = mwifiex_ethtool_set_wol,
- .get_dump_flag = mwifiex_get_dump_flag,
- .get_dump_data = mwifiex_get_dump_data,
- .set_dump = mwifiex_set_dump,
};
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 59d8964dd0dc..c404390cb0fa 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -323,6 +323,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075
#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f
#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083
+#define HostCmd_CMD_MEM_ACCESS 0x0086
#define HostCmd_CMD_CFG_DATA 0x008f
#define HostCmd_CMD_VERSION_EXT 0x0097
#define HostCmd_CMD_MEF_CFG 0x009a
@@ -1576,6 +1577,13 @@ struct mwifiex_ie_types_extcap {
u8 ext_capab[0];
} __packed;
+struct host_cmd_ds_mem_access {
+ __le16 action;
+ __le16 reserved;
+ __le32 addr;
+ __le32 value;
+};
+
struct mwifiex_ie_types_qos_info {
struct mwifiex_ie_types_header header;
u8 qos_info;
@@ -1958,6 +1966,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_p2p_mode_cfg mode_cfg;
struct host_cmd_ds_802_11_ibss_status ibss_coalescing;
struct host_cmd_ds_mef_cfg mef_cfg;
+ struct host_cmd_ds_mem_access mem;
struct host_cmd_ds_mac_reg_access mac_reg;
struct host_cmd_ds_bbp_reg_access bbp_reg;
struct host_cmd_ds_rf_reg_access rf_reg;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index e12192f5cfad..df7fdc09d38c 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -56,7 +56,7 @@ static void wakeup_timer_fn(unsigned long data)
{
struct mwifiex_adapter *adapter = (struct mwifiex_adapter *)data;
- dev_err(adapter->dev, "Firmware wakeup failed\n");
+ mwifiex_dbg(adapter, ERROR, "Firmware wakeup failed\n");
adapter->hw_status = MWIFIEX_HW_STATUS_RESET;
mwifiex_cancel_all_pending_cmd(adapter);
@@ -172,8 +172,9 @@ static int mwifiex_allocate_adapter(struct mwifiex_adapter *adapter)
/* Allocate command buffer */
ret = mwifiex_alloc_cmd_buffer(adapter);
if (ret) {
- dev_err(adapter->dev, "%s: failed to alloc cmd buffer\n",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to alloc cmd buffer\n",
+ __func__);
return -1;
}
@@ -182,8 +183,9 @@ static int mwifiex_allocate_adapter(struct mwifiex_adapter *adapter)
+ INTF_HEADER_LEN);
if (!adapter->sleep_cfm) {
- dev_err(adapter->dev, "%s: failed to alloc sleep cfm"
- " cmd buffer\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to alloc sleep cfm\t"
+ " cmd buffer\n", __func__);
return -1;
}
skb_reserve(adapter->sleep_cfm, INTF_HEADER_LEN);
@@ -417,7 +419,7 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
mwifiex_free_lock_list(adapter);
/* Free command buffer */
- dev_dbg(adapter->dev, "info: free cmd buffer\n");
+ mwifiex_dbg(adapter, INFO, "info: free cmd buffer\n");
mwifiex_free_cmd_buffer(adapter);
for (idx = 0; idx < adapter->num_mem_types; idx++) {
@@ -433,6 +435,7 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
if (adapter->drv_info_dump) {
vfree(adapter->drv_info_dump);
+ adapter->drv_info_dump = NULL;
adapter->drv_info_size = 0;
}
@@ -595,10 +598,11 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
for (i = 0; i < adapter->priv_num; ++i) {
head = &adapter->bss_prio_tbl[i].bss_prio_head;
lock = &adapter->bss_prio_tbl[i].bss_prio_lock;
- dev_dbg(adapter->dev, "info: delete BSS priority table,"
- " bss_type = %d, bss_num = %d, i = %d,"
- " head = %p\n",
- priv->bss_type, priv->bss_num, i, head);
+ mwifiex_dbg(adapter, INFO,
+ "info: delete BSS priority table,\t"
+ "bss_type = %d, bss_num = %d, i = %d,\t"
+ "head = %p\n",
+ priv->bss_type, priv->bss_num, i, head);
{
spin_lock_irqsave(lock, flags);
@@ -609,9 +613,10 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
list_for_each_entry_safe(bssprio_node, tmp_node, head,
list) {
if (bssprio_node->priv == priv) {
- dev_dbg(adapter->dev, "info: Delete "
- "node %p, next = %p\n",
- bssprio_node, tmp_node);
+ mwifiex_dbg(adapter, INFO,
+ "info: Delete\t"
+ "node %p, next = %p\n",
+ bssprio_node, tmp_node);
list_del(&bssprio_node->list);
kfree(bssprio_node);
}
@@ -659,20 +664,23 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
adapter->hw_status = MWIFIEX_HW_STATUS_CLOSING;
/* wait for mwifiex_process to complete */
if (adapter->mwifiex_processing) {
- dev_warn(adapter->dev, "main process is still running\n");
+ mwifiex_dbg(adapter, WARN,
+ "main process is still running\n");
return ret;
}
/* cancel current command */
if (adapter->curr_cmd) {
- dev_warn(adapter->dev, "curr_cmd is still in processing\n");
+ mwifiex_dbg(adapter, WARN,
+ "curr_cmd is still in processing\n");
del_timer_sync(&adapter->cmd_timer);
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
adapter->curr_cmd = NULL;
}
/* shut down mwifiex */
- dev_dbg(adapter->dev, "info: shutdown mwifiex...\n");
+ mwifiex_dbg(adapter, MSG,
+ "info: shutdown mwifiex...\n");
/* Clean up Tx/Rx queues and delete BSS priority table */
for (i = 0; i < adapter->priv_num; i++) {
@@ -741,8 +749,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
/* check if firmware is already running */
ret = adapter->if_ops.check_fw_status(adapter, poll_num);
if (!ret) {
- dev_notice(adapter->dev,
- "WLAN FW already running! Skip FW dnld\n");
+ mwifiex_dbg(adapter, MSG,
+ "WLAN FW already running! Skip FW dnld\n");
return 0;
}
@@ -750,8 +758,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
/* check if we are the winner for downloading FW */
if (!adapter->winner) {
- dev_notice(adapter->dev,
- "FW already running! Skip FW dnld\n");
+ mwifiex_dbg(adapter, MSG,
+ "FW already running! Skip FW dnld\n");
goto poll_fw;
}
}
@@ -760,7 +768,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
/* Download firmware with helper */
ret = adapter->if_ops.prog_fw(adapter, pmfw);
if (ret) {
- dev_err(adapter->dev, "prog_fw failed ret=%#x\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "prog_fw failed ret=%#x\n", ret);
return ret;
}
}
@@ -769,7 +778,8 @@ poll_fw:
/* Check if the firmware is downloaded successfully or not */
ret = adapter->if_ops.check_fw_status(adapter, poll_num);
if (ret)
- dev_err(adapter->dev, "FW failed to be active in time\n");
+ mwifiex_dbg(adapter, ERROR,
+ "FW failed to be active in time\n");
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index d2b05c3a96da..6f11a25a6b49 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -189,6 +189,7 @@ struct tdls_peer_info {
};
struct mwifiex_debug_info {
+ unsigned int debug_mask;
u32 int_counter;
u32 packets_out[MAX_NUM_TID];
u32 tx_buf_size;
@@ -342,6 +343,11 @@ struct mwifiex_ds_read_eeprom {
u8 value[MAX_EEPROM_DATA];
};
+struct mwifiex_ds_mem_rw {
+ u32 addr;
+ u32 value;
+};
+
#define IEEE_MAX_IE_SIZE 256
#define MWIFIEX_IE_HDR_SIZE (sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE)
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 411a6c2f4aca..cce8e39aa45e 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -53,9 +53,9 @@ mwifiex_cmd_append_generic_ie(struct mwifiex_private *priv, u8 **buffer)
* parameter buffer pointer.
*/
if (priv->gen_ie_buf_len) {
- dev_dbg(priv->adapter->dev,
- "info: %s: append generic ie len %d to %p\n",
- __func__, priv->gen_ie_buf_len, *buffer);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: %s: append generic ie len %d to %p\n",
+ __func__, priv->gen_ie_buf_len, *buffer);
/* Wrap the generic IE buffer with a pass through TLV type */
ie_header.type = cpu_to_le16(TLV_TYPE_PASSTHROUGH);
@@ -125,9 +125,9 @@ mwifiex_cmd_append_tsf_tlv(struct mwifiex_private *priv, u8 **buffer,
tsf_val = cpu_to_le64(bss_desc->timestamp);
- dev_dbg(priv->adapter->dev,
- "info: %s: TSF offset calc: %016llx - %016llx\n",
- __func__, bss_desc->timestamp, bss_desc->fw_tsf);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: %s: TSF offset calc: %016llx - %016llx\n",
+ __func__, bss_desc->timestamp, bss_desc->fw_tsf);
memcpy(*buffer, &tsf_val, sizeof(tsf_val));
*buffer += sizeof(tsf_val);
@@ -152,7 +152,7 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
tmp = kmemdup(rate1, rate1_size, GFP_KERNEL);
if (!tmp) {
- dev_err(priv->adapter->dev, "failed to alloc tmp buf\n");
+ mwifiex_dbg(priv->adapter, ERROR, "failed to alloc tmp buf\n");
return -ENOMEM;
}
@@ -169,8 +169,8 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
}
}
- dev_dbg(priv->adapter->dev, "info: Tx data rate set to %#x\n",
- priv->data_rate);
+ mwifiex_dbg(priv->adapter, INFO, "info: Tx data rate set to %#x\n",
+ priv->data_rate);
if (!priv->is_data_rate_auto) {
while (*ptr) {
@@ -180,9 +180,10 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
}
ptr++;
}
- dev_err(priv->adapter->dev, "previously set fixed data rate %#x"
- " is not compatible with the network\n",
- priv->data_rate);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "previously set fixed data rate %#x\t"
+ "is not compatible with the network\n",
+ priv->data_rate);
ret = -1;
goto done;
@@ -214,8 +215,9 @@ mwifiex_setup_rates_from_bssdesc(struct mwifiex_private *priv,
if (mwifiex_get_common_rates(priv, out_rates, MWIFIEX_SUPPORTED_RATES,
card_rates, card_rates_size)) {
*out_rates_size = 0;
- dev_err(priv->adapter->dev, "%s: cannot get common rates\n",
- __func__);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: cannot get common rates\n",
+ __func__);
return -1;
}
@@ -246,8 +248,9 @@ mwifiex_cmd_append_wps_ie(struct mwifiex_private *priv, u8 **buffer)
* parameter buffer pointer.
*/
if (priv->wps_ie_len) {
- dev_dbg(priv->adapter->dev, "cmd: append wps ie %d to %p\n",
- priv->wps_ie_len, *buffer);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: append wps ie %d to %p\n",
+ priv->wps_ie_len, *buffer);
/* Wrap the generic IE buffer with a pass through TLV type */
ie_header.type = cpu_to_le16(TLV_TYPE_MGMT_IE);
@@ -292,8 +295,9 @@ mwifiex_cmd_append_wapi_ie(struct mwifiex_private *priv, u8 **buffer)
* parameter buffer pointer.
*/
if (priv->wapi_ie_len) {
- dev_dbg(priv->adapter->dev, "cmd: append wapi ie %d to %p\n",
- priv->wapi_ie_len, *buffer);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: append wapi ie %d to %p\n",
+ priv->wapi_ie_len, *buffer);
/* Wrap the generic IE buffer with a pass through TLV type */
ie_header.type = cpu_to_le16(TLV_TYPE_WAPI_IE);
@@ -453,8 +457,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
rates_tlv->header.len = cpu_to_le16((u16) rates_size);
memcpy(rates_tlv->rates, rates, rates_size);
pos += sizeof(rates_tlv->header) + rates_size;
- dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: rates size = %d\n",
- rates_size);
+ mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_CMD: rates size = %d\n",
+ rates_size);
/* Add the Authentication type to be used for Auth frames */
auth_tlv = (struct mwifiex_ie_types_auth_type *) pos;
@@ -487,14 +491,14 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
sizeof(struct mwifiex_chan_scan_param_set));
chan_tlv->chan_scan_param[0].chan_number =
(bss_desc->phy_param_set.ds_param_set.current_chan);
- dev_dbg(priv->adapter->dev, "info: Assoc: TLV Chan = %d\n",
- chan_tlv->chan_scan_param[0].chan_number);
+ mwifiex_dbg(priv->adapter, INFO, "info: Assoc: TLV Chan = %d\n",
+ chan_tlv->chan_scan_param[0].chan_number);
chan_tlv->chan_scan_param[0].radio_type =
mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
- dev_dbg(priv->adapter->dev, "info: Assoc: TLV Band = %d\n",
- chan_tlv->chan_scan_param[0].radio_type);
+ mwifiex_dbg(priv->adapter, INFO, "info: Assoc: TLV Band = %d\n",
+ chan_tlv->chan_scan_param[0].radio_type);
pos += sizeof(chan_tlv->header) +
sizeof(struct mwifiex_chan_scan_param_set);
}
@@ -544,8 +548,9 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
tmp_cap &= ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
tmp_cap &= CAPINFO_MASK;
- dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
- tmp_cap, CAPINFO_MASK);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
+ tmp_cap, CAPINFO_MASK);
assoc->cap_info_bitmap = cpu_to_le16(tmp_cap);
return 0;
@@ -621,23 +626,35 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
struct ieee_types_assoc_rsp *assoc_rsp;
struct mwifiex_bssdescriptor *bss_desc;
bool enable_data = true;
- u16 cap_info, status_code;
+ u16 cap_info, status_code, aid;
assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
cap_info = le16_to_cpu(assoc_rsp->cap_info_bitmap);
status_code = le16_to_cpu(assoc_rsp->status_code);
+ aid = le16_to_cpu(assoc_rsp->a_id);
+
+ if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
+ dev_err(priv->adapter->dev,
+ "invalid AID value 0x%x; bits 15:14 not set\n",
+ aid);
+
+ aid &= ~(BIT(15) | BIT(14));
priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
sizeof(priv->assoc_rsp_buf));
memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
+ assoc_rsp->a_id = cpu_to_le16(aid);
+
if (status_code) {
priv->adapter->dbg.num_cmd_assoc_failure++;
- dev_err(priv->adapter->dev,
- "ASSOC_RESP: failed, status code=%d err=%#x a_id=%#x\n",
- status_code, cap_info, le16_to_cpu(assoc_rsp->a_id));
+ mwifiex_dbg(priv->adapter, ERROR,
+ "ASSOC_RESP: failed,\t"
+ "status code=%d err=%#x a_id=%#x\n",
+ status_code, cap_info,
+ le16_to_cpu(assoc_rsp->a_id));
if (cap_info == MWIFIEX_TIMEOUT_FOR_AP_RESP) {
if (status_code == MWIFIEX_STATUS_CODE_AUTH_TIMEOUT)
@@ -661,8 +678,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
/* Set the attempted BSSID Index to current */
bss_desc = priv->attempted_bss_desc;
- dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: %s\n",
- bss_desc->ssid.ssid);
+ mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: %s\n",
+ bss_desc->ssid.ssid);
/* Make a copy of current BSSID descriptor */
memcpy(&priv->curr_bss_params.bss_descriptor,
@@ -692,8 +709,9 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
= ((bss_desc->wmm_ie.qos_info_bitmap &
IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0);
- dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: curr_pkt_filter is %#x\n",
- priv->curr_pkt_filter);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ASSOC_RESP: curr_pkt_filter is %#x\n",
+ priv->curr_pkt_filter);
if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
priv->wpa_is_gtk_set = false;
@@ -709,8 +727,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
}
if (enable_data)
- dev_dbg(priv->adapter->dev,
- "info: post association, re-enabling data flow\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: post association, re-enabling data flow\n");
/* Reset SNR/NF/RSSI values */
priv->data_rssi_last = 0;
@@ -728,7 +746,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
priv->adapter->dbg.num_cmd_assoc_success++;
- dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: associated\n");
+ mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: associated\n");
/* Add the ra_list here for infra mode as there will be only 1 ra
always */
@@ -815,8 +833,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len);
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: SSID = %s\n",
- adhoc_start->ssid);
+ mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n",
+ adhoc_start->ssid);
memset(bss_desc->ssid.ssid, 0, IEEE80211_MAX_SSID_LEN);
memcpy(bss_desc->ssid.ssid, req_ssid->ssid, req_ssid->ssid_len);
@@ -848,12 +866,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
}
if (!priv->adhoc_channel) {
- dev_err(adapter->dev, "ADHOC_S_CMD: adhoc_channel cannot be 0\n");
+ mwifiex_dbg(adapter, ERROR,
+ "ADHOC_S_CMD: adhoc_channel cannot be 0\n");
return -1;
}
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: creating ADHOC on channel %d\n",
- priv->adhoc_channel);
+ mwifiex_dbg(adapter, INFO,
+ "info: ADHOC_S_CMD: creating ADHOC on channel %d\n",
+ priv->adhoc_channel);
priv->curr_bss_params.bss_descriptor.channel = priv->adhoc_channel;
priv->curr_bss_params.band = adapter->adhoc_start_band;
@@ -885,13 +905,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
/* Set up privacy in bss_desc */
if (priv->sec_info.encryption_mode) {
/* Ad-Hoc capability privacy on */
- dev_dbg(adapter->dev,
- "info: ADHOC_S_CMD: wep_status set privacy to WEP\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: ADHOC_S_CMD: wep_status set privacy to WEP\n");
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
tmp_cap |= WLAN_CAPABILITY_PRIVACY;
} else {
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: wep_status NOT set,"
- " setting privacy to ACCEPT ALL\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: ADHOC_S_CMD: wep_status NOT set,\t"
+ "setting privacy to ACCEPT ALL\n");
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
}
@@ -902,8 +923,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
HostCmd_ACT_GEN_SET, 0,
&priv->curr_pkt_filter, false)) {
- dev_err(adapter->dev,
- "ADHOC_S_CMD: G Protection config failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "ADHOC_S_CMD: G Protection config failed\n");
return -1;
}
}
@@ -918,10 +939,10 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
memcpy(&priv->curr_bss_params.data_rates,
&adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%4ph\n",
- adhoc_start->data_rate);
+ mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: rates=%4ph\n",
+ adhoc_start->data_rate);
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
+ mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
if (IS_SUPPORT_MULTI_BANDS(adapter)) {
/* Append a channel TLV */
@@ -935,8 +956,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
chan_tlv->chan_scan_param[0].chan_number =
(u8) priv->curr_bss_params.bss_descriptor.channel;
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Chan = %d\n",
- chan_tlv->chan_scan_param[0].chan_number);
+ mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: TLV Chan = %d\n",
+ chan_tlv->chan_scan_param[0].chan_number);
chan_tlv->chan_scan_param[0].radio_type
= mwifiex_band_to_radio_type(priv->curr_bss_params.band);
@@ -951,8 +972,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
chan_tlv->chan_scan_param[0].radio_type |=
(IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4);
}
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n",
- chan_tlv->chan_scan_param[0].radio_type);
+ mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: TLV Band = %d\n",
+ chan_tlv->chan_scan_param[0].radio_type);
pos += sizeof(chan_tlv->header) +
sizeof(struct mwifiex_chan_scan_param_set);
cmd_append_size +=
@@ -1074,8 +1095,8 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
HostCmd_ACT_GEN_SET, 0,
&curr_pkt_filter, false)) {
- dev_err(priv->adapter->dev,
- "ADHOC_J_CMD: G Protection config failed\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "ADHOC_J_CMD: G Protection config failed\n");
return -1;
}
}
@@ -1106,14 +1127,15 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
tmp_cap &= CAPINFO_MASK;
- dev_dbg(priv->adapter->dev,
- "info: ADHOC_J_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
- tmp_cap, CAPINFO_MASK);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ADHOC_J_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
+ tmp_cap, CAPINFO_MASK);
/* Information on BSSID descriptor passed to FW */
- dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: BSSID=%pM, SSID='%s'\n",
- adhoc_join->bss_descriptor.bssid,
- adhoc_join->bss_descriptor.ssid);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ADHOC_J_CMD: BSSID=%pM, SSID='%s'\n",
+ adhoc_join->bss_descriptor.bssid,
+ adhoc_join->bss_descriptor.ssid);
for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
bss_desc->supported_rates[i]; i++)
@@ -1149,14 +1171,14 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
sizeof(struct mwifiex_chan_scan_param_set));
chan_tlv->chan_scan_param[0].chan_number =
(bss_desc->phy_param_set.ds_param_set.current_chan);
- dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Chan=%d\n",
- chan_tlv->chan_scan_param[0].chan_number);
+ mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_J_CMD: TLV Chan=%d\n",
+ chan_tlv->chan_scan_param[0].chan_number);
chan_tlv->chan_scan_param[0].radio_type =
mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
- dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Band=%d\n",
- chan_tlv->chan_scan_param[0].radio_type);
+ mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_J_CMD: TLV Band=%d\n",
+ chan_tlv->chan_scan_param[0].radio_type);
pos += sizeof(chan_tlv->header) +
sizeof(struct mwifiex_chan_scan_param_set);
cmd_append_size += sizeof(chan_tlv->header) +
@@ -1210,7 +1232,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
/* Join result code 0 --> SUCCESS */
reason_code = le16_to_cpu(resp->result);
if (reason_code) {
- dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n");
+ mwifiex_dbg(priv->adapter, ERROR, "ADHOC_RESP: failed\n");
if (priv->media_connected)
mwifiex_reset_connect_state(priv, reason_code);
@@ -1225,8 +1247,8 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
priv->media_connected = true;
if (le16_to_cpu(resp->command) == HostCmd_CMD_802_11_AD_HOC_START) {
- dev_dbg(priv->adapter->dev, "info: ADHOC_S_RESP %s\n",
- bss_desc->ssid.ssid);
+ mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_S_RESP %s\n",
+ bss_desc->ssid.ssid);
/* Update the created network descriptor with the new BSSID */
memcpy(bss_desc->mac_address,
@@ -1238,8 +1260,9 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
* Now the join cmd should be successful.
* If BSSID has changed use SSID to compare instead of BSSID
*/
- dev_dbg(priv->adapter->dev, "info: ADHOC_J_RESP %s\n",
- bss_desc->ssid.ssid);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ADHOC_J_RESP %s\n",
+ bss_desc->ssid.ssid);
/*
* Make a copy of current BSSID descriptor, only needed for
@@ -1252,10 +1275,10 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
priv->adhoc_state = ADHOC_JOINED;
}
- dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: channel = %d\n",
- priv->adhoc_channel);
- dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: BSSID = %pM\n",
- priv->curr_bss_params.bss_descriptor.mac_address);
+ mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_RESP: channel = %d\n",
+ priv->adhoc_channel);
+ mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_RESP: BSSID = %pM\n",
+ priv->curr_bss_params.bss_descriptor.mac_address);
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
@@ -1317,12 +1340,12 @@ int
mwifiex_adhoc_start(struct mwifiex_private *priv,
struct cfg80211_ssid *adhoc_ssid)
{
- dev_dbg(priv->adapter->dev, "info: Adhoc Channel = %d\n",
- priv->adhoc_channel);
- dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
- priv->curr_bss_params.bss_descriptor.channel);
- dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %d\n",
- priv->curr_bss_params.band);
+ mwifiex_dbg(priv->adapter, INFO, "info: Adhoc Channel = %d\n",
+ priv->adhoc_channel);
+ mwifiex_dbg(priv->adapter, INFO, "info: curr_bss_params.channel = %d\n",
+ priv->curr_bss_params.bss_descriptor.channel);
+ mwifiex_dbg(priv->adapter, INFO, "info: curr_bss_params.band = %d\n",
+ priv->curr_bss_params.band);
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
priv->adapter->config_bands & BAND_AAC)
@@ -1343,14 +1366,16 @@ mwifiex_adhoc_start(struct mwifiex_private *priv,
int mwifiex_adhoc_join(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
- dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid =%s\n",
- priv->curr_bss_params.bss_descriptor.ssid.ssid);
- dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid_len =%u\n",
- priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
- dev_dbg(priv->adapter->dev, "info: adhoc join: ssid =%s\n",
- bss_desc->ssid.ssid);
- dev_dbg(priv->adapter->dev, "info: adhoc join: ssid_len =%u\n",
- bss_desc->ssid.ssid_len);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: adhoc join: curr_bss ssid =%s\n",
+ priv->curr_bss_params.bss_descriptor.ssid.ssid);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: adhoc join: curr_bss ssid_len =%u\n",
+ priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
+ mwifiex_dbg(priv->adapter, INFO, "info: adhoc join: ssid =%s\n",
+ bss_desc->ssid.ssid);
+ mwifiex_dbg(priv->adapter, INFO, "info: adhoc join: ssid_len =%u\n",
+ bss_desc->ssid.ssid_len);
/* Check if the requested SSID is already joined */
if (priv->curr_bss_params.bss_descriptor.ssid.ssid_len &&
@@ -1358,8 +1383,9 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
&priv->curr_bss_params.bss_descriptor.ssid) &&
(priv->curr_bss_params.bss_descriptor.bss_mode ==
NL80211_IFTYPE_ADHOC)) {
- dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: new ad-hoc SSID"
- " is the same as current; not attempting to re-join\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ADHOC_J_CMD: new ad-hoc SSID\t"
+ "is the same as current; not attempting to re-join\n");
return -1;
}
@@ -1370,10 +1396,12 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
else
mwifiex_set_ba_params(priv);
- dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
- priv->curr_bss_params.bss_descriptor.channel);
- dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n",
- priv->curr_bss_params.band);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: curr_bss_params.channel = %d\n",
+ priv->curr_bss_params.bss_descriptor.channel);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: curr_bss_params.band = %c\n",
+ priv->curr_bss_params.band);
return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
HostCmd_ACT_GEN_SET, 0, bss_desc, true);
@@ -1421,7 +1449,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
ret = mwifiex_deauthenticate_infra(priv, mac);
if (ret)
cfg80211_disconnected(priv->netdev, 0, NULL, 0,
- GFP_KERNEL);
+ true, GFP_KERNEL);
break;
case NL80211_IFTYPE_ADHOC:
return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_STOP,
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 03a95c7d34bf..3ba4e0e04223 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -24,6 +24,10 @@
#define VERSION "1.0"
+static unsigned int debug_mask = MWIFIEX_DEFAULT_DEBUG_MASK;
+module_param(debug_mask, uint, 0);
+MODULE_PARM_DESC(debug_mask, "bitmap for debug flags");
+
const char driver_version[] = "mwifiex " VERSION " (%s) ";
static char *cal_data_cfg;
module_param(cal_data_cfg, charp, 0);
@@ -63,6 +67,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
/* Save interface specific operations in adapter */
memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops));
+ adapter->debug_mask = debug_mask;
/* card specific initialization has been deferred until now .. */
if (adapter->if_ops.init_if)
@@ -89,7 +94,8 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
return 0;
error:
- dev_dbg(adapter->dev, "info: leave mwifiex_register with error\n");
+ mwifiex_dbg(adapter, ERROR,
+ "info: leave mwifiex_register with error\n");
for (i = 0; i < adapter->priv_num; i++)
kfree(adapter->priv[i]);
@@ -231,11 +237,10 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
goto exit_main_proc;
} else {
adapter->mwifiex_processing = true;
+ spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
}
process_start:
do {
- adapter->more_task_flag = false;
- spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) ||
(adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
break;
@@ -275,7 +280,6 @@ process_start:
adapter->pm_wakeup_fw_try = true;
mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
adapter->if_ops.wakeup(adapter);
- spin_lock_irqsave(&adapter->main_proc_lock, flags);
continue;
}
@@ -335,7 +339,6 @@ process_start:
(adapter->ps_state == PS_STATE_PRE_SLEEP) ||
(adapter->ps_state == PS_STATE_SLEEP_CFM) ||
adapter->tx_lock_flag){
- spin_lock_irqsave(&adapter->main_proc_lock, flags);
continue;
}
@@ -386,12 +389,14 @@ process_start:
}
break;
}
- spin_lock_irqsave(&adapter->main_proc_lock, flags);
} while (true);
spin_lock_irqsave(&adapter->main_proc_lock, flags);
- if (adapter->more_task_flag)
+ if (adapter->more_task_flag) {
+ adapter->more_task_flag = false;
+ spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
goto process_start;
+ }
adapter->mwifiex_processing = false;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
@@ -455,8 +460,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
struct wireless_dev *wdev;
if (!firmware) {
- dev_err(adapter->dev,
- "Failed to get firmware %s\n", adapter->fw_name);
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to get firmware %s\n", adapter->fw_name);
goto err_dnld_fw;
}
@@ -472,13 +477,13 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
if (ret == -1)
goto err_dnld_fw;
- dev_notice(adapter->dev, "WLAN FW is active\n");
+ mwifiex_dbg(adapter, MSG, "WLAN FW is active\n");
if (cal_data_cfg) {
if ((request_firmware(&adapter->cal_data, cal_data_cfg,
adapter->dev)) < 0)
- dev_err(adapter->dev,
- "Cal data request_firmware() failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Cal data request_firmware() failed\n");
}
/* enable host interrupt after fw dnld is successful */
@@ -503,12 +508,14 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
if (mwifiex_register_cfg80211(adapter)) {
- dev_err(adapter->dev, "cannot register with cfg80211\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot register with cfg80211\n");
goto err_init_fw;
}
if (mwifiex_init_channel_scan_gap(adapter)) {
- dev_err(adapter->dev, "could not init channel stats table\n");
+ mwifiex_dbg(adapter, ERROR,
+ "could not init channel stats table\n");
goto err_init_fw;
}
@@ -522,7 +529,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", NET_NAME_ENUM,
NL80211_IFTYPE_STATION, NULL, NULL);
if (IS_ERR(wdev)) {
- dev_err(adapter->dev, "cannot create default STA interface\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create default STA interface\n");
rtnl_unlock();
goto err_add_intf;
}
@@ -531,7 +539,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d", NET_NAME_ENUM,
NL80211_IFTYPE_AP, NULL, NULL);
if (IS_ERR(wdev)) {
- dev_err(adapter->dev, "cannot create AP interface\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create AP interface\n");
rtnl_unlock();
goto err_add_intf;
}
@@ -542,8 +551,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
NL80211_IFTYPE_P2P_CLIENT, NULL,
NULL);
if (IS_ERR(wdev)) {
- dev_err(adapter->dev,
- "cannot create p2p client interface\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create p2p client interface\n");
rtnl_unlock();
goto err_add_intf;
}
@@ -551,7 +560,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
rtnl_unlock();
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
- dev_notice(adapter->dev, "driver_version = %s\n", fmt);
+ mwifiex_dbg(adapter, MSG, "driver_version = %s\n", fmt);
goto done;
err_add_intf:
@@ -561,7 +570,8 @@ err_init_fw:
if (adapter->if_ops.disable_int)
adapter->if_ops.disable_int(adapter);
err_dnld_fw:
- pr_debug("info: %s: unregister device\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "info: %s: unregister device\n", __func__);
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
@@ -602,8 +612,8 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
adapter->dev, GFP_KERNEL, adapter,
mwifiex_fw_dpc);
if (ret < 0)
- dev_err(adapter->dev,
- "request_firmware_nowait() returned error %d\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "request_firmware_nowait error %d\n", ret);
return ret;
}
@@ -629,7 +639,8 @@ mwifiex_close(struct net_device *dev)
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
if (priv->scan_request) {
- dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "aborting scan on ndo_stop\n");
cfg80211_scan_done(priv->scan_request, 1);
priv->scan_request = NULL;
priv->scan_aborting = true;
@@ -650,7 +661,8 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
txq = netdev_get_tx_queue(priv->netdev, index);
if (!netif_tx_queue_stopped(txq)) {
netif_tx_stop_queue(txq);
- dev_dbg(priv->adapter->dev, "stop queue: %d\n", index);
+ mwifiex_dbg(priv->adapter, DATA,
+ "stop queue: %d\n", index);
}
}
@@ -715,8 +727,9 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct mwifiex_txinfo *tx_info;
bool multicast;
- dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n",
- jiffies, priv->bss_type, priv->bss_num);
+ mwifiex_dbg(priv->adapter, DATA,
+ "data: %lu BSS(%d-%d): Data <= kernel\n",
+ jiffies, priv->bss_type, priv->bss_num);
if (priv->adapter->surprise_removed) {
kfree_skb(skb);
@@ -724,28 +737,31 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
if (!skb->len || (skb->len > ETH_FRAME_LEN)) {
- dev_err(priv->adapter->dev, "Tx: bad skb len %d\n", skb->len);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Tx: bad skb len %d\n", skb->len);
kfree_skb(skb);
priv->stats.tx_dropped++;
return 0;
}
if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
- dev_dbg(priv->adapter->dev,
- "data: Tx: insufficient skb headroom %d\n",
- skb_headroom(skb));
+ mwifiex_dbg(priv->adapter, DATA,
+ "data: Tx: insufficient skb headroom %d\n",
+ skb_headroom(skb));
/* Insufficient skb headroom - allocate a new skb */
new_skb =
skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
if (unlikely(!new_skb)) {
- dev_err(priv->adapter->dev, "Tx: cannot alloca new_skb\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Tx: cannot alloca new_skb\n");
kfree_skb(skb);
priv->stats.tx_dropped++;
return 0;
}
kfree_skb(skb);
skb = new_skb;
- dev_dbg(priv->adapter->dev, "info: new skb headroomd %d\n",
- skb_headroom(skb));
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: new skb headroomd %d\n",
+ skb_headroom(skb));
}
tx_info = MWIFIEX_SKB_TXCB(skb);
@@ -803,8 +819,8 @@ mwifiex_set_mac_address(struct net_device *dev, void *addr)
if (!ret)
memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN);
else
- dev_err(priv->adapter->dev,
- "set mac address failed: ret=%d\n", ret);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "set mac address failed: ret=%d\n", ret);
memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
@@ -842,20 +858,22 @@ mwifiex_tx_timeout(struct net_device *dev)
priv->num_tx_timeout++;
priv->tx_timeout_cnt++;
- dev_err(priv->adapter->dev,
- "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n",
- jiffies, priv->tx_timeout_cnt, priv->bss_type, priv->bss_num);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n",
+ jiffies, priv->tx_timeout_cnt, priv->bss_type,
+ priv->bss_num);
mwifiex_set_trans_start(dev);
if (priv->tx_timeout_cnt > TX_TIMEOUT_THRESHOLD &&
priv->adapter->if_ops.card_reset) {
- dev_err(priv->adapter->dev,
- "tx_timeout_cnt exceeds threshold. Triggering card reset!\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "tx_timeout_cnt exceeds threshold.\t"
+ "Triggering card reset!\n");
priv->adapter->if_ops.card_reset(priv->adapter);
}
}
-void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
{
void *p;
char drv_version[64];
@@ -868,10 +886,11 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
if (adapter->drv_info_dump) {
vfree(adapter->drv_info_dump);
+ adapter->drv_info_dump = NULL;
adapter->drv_info_size = 0;
}
- dev_info(adapter->dev, "=== DRIVER INFO DUMP START===\n");
+ mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n");
adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
@@ -939,12 +958,12 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
}
if (adapter->iface_type == MWIFIEX_SDIO) {
- p += sprintf(p, "\n=== SDIO register DUMP===\n");
+ p += sprintf(p, "\n=== SDIO register dump===\n");
if (adapter->if_ops.reg_dump)
p += adapter->if_ops.reg_dump(adapter, p);
}
- p += sprintf(p, "\n=== MORE DEBUG INFORMATION\n");
+ p += sprintf(p, "\n=== more debug information\n");
debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL);
if (debug_info) {
for (i = 0; i < adapter->priv_num; i++) {
@@ -959,9 +978,99 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
}
adapter->drv_info_size = p - adapter->drv_info_dump;
- dev_info(adapter->dev, "=== DRIVER INFO DUMP END===\n");
+ mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n");
+}
+EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump);
+
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter)
+{
+ u8 idx, *dump_data, *fw_dump_ptr;
+ u32 dump_len;
+
+ dump_len = (strlen("========Start dump driverinfo========\n") +
+ adapter->drv_info_size +
+ strlen("\n========End dump========\n"));
+
+ for (idx = 0; idx < adapter->num_mem_types; idx++) {
+ struct memory_type_mapping *entry =
+ &adapter->mem_type_mapping_tbl[idx];
+
+ if (entry->mem_ptr) {
+ dump_len += (strlen("========Start dump ") +
+ strlen(entry->mem_name) +
+ strlen("========\n") +
+ (entry->mem_size + 1) +
+ strlen("\n========End dump========\n"));
+ }
+ }
+
+ dump_data = vzalloc(dump_len + 1);
+ if (!dump_data)
+ goto done;
+
+ fw_dump_ptr = dump_data;
+
+ /* Dump all the memory data into single file, a userspace script will
+ * be used to split all the memory data to multiple files
+ */
+ mwifiex_dbg(adapter, MSG,
+ "== mwifiex dump information to /sys/class/devcoredump start");
+
+ strcpy(fw_dump_ptr, "========Start dump driverinfo========\n");
+ fw_dump_ptr += strlen("========Start dump driverinfo========\n");
+ memcpy(fw_dump_ptr, adapter->drv_info_dump, adapter->drv_info_size);
+ fw_dump_ptr += adapter->drv_info_size;
+ strcpy(fw_dump_ptr, "\n========End dump========\n");
+ fw_dump_ptr += strlen("\n========End dump========\n");
+
+ for (idx = 0; idx < adapter->num_mem_types; idx++) {
+ struct memory_type_mapping *entry =
+ &adapter->mem_type_mapping_tbl[idx];
+
+ if (entry->mem_ptr) {
+ strcpy(fw_dump_ptr, "========Start dump ");
+ fw_dump_ptr += strlen("========Start dump ");
+
+ strcpy(fw_dump_ptr, entry->mem_name);
+ fw_dump_ptr += strlen(entry->mem_name);
+
+ strcpy(fw_dump_ptr, "========\n");
+ fw_dump_ptr += strlen("========\n");
+
+ memcpy(fw_dump_ptr, entry->mem_ptr, entry->mem_size);
+ fw_dump_ptr += entry->mem_size;
+
+ strcpy(fw_dump_ptr, "\n========End dump========\n");
+ fw_dump_ptr += strlen("\n========End dump========\n");
+ }
+ }
+
+ /* device dump data will be free in device coredump release function
+ * after 5 min
+ */
+ dev_coredumpv(adapter->dev, dump_data, dump_len, GFP_KERNEL);
+ mwifiex_dbg(adapter, MSG,
+ "== mwifiex dump information to /sys/class/devcoredump end");
+
+done:
+ for (idx = 0; idx < adapter->num_mem_types; idx++) {
+ struct memory_type_mapping *entry =
+ &adapter->mem_type_mapping_tbl[idx];
+
+ if (entry->mem_ptr) {
+ vfree(entry->mem_ptr);
+ entry->mem_ptr = NULL;
+ }
+ entry->mem_size = 0;
+ }
+
+ if (adapter->drv_info_dump) {
+ vfree(adapter->drv_info_dump);
+ adapter->drv_info_dump = NULL;
+ adapter->drv_info_size = 0;
+ }
}
-EXPORT_SYMBOL_GPL(mwifiex_dump_drv_info);
+EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
/*
* CFG802.11 network device handler for statistics retrieval.
@@ -1230,21 +1339,24 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
}
}
- dev_dbg(adapter->dev, "cmd: calling mwifiex_shutdown_drv...\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: calling mwifiex_shutdown_drv...\n");
adapter->init_wait_q_woken = false;
if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
wait_event_interruptible(adapter->init_wait_q,
adapter->init_wait_q_woken);
- dev_dbg(adapter->dev, "cmd: mwifiex_shutdown_drv done\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: mwifiex_shutdown_drv done\n");
if (atomic_read(&adapter->rx_pending) ||
atomic_read(&adapter->tx_pending) ||
atomic_read(&adapter->cmd_pending)) {
- dev_err(adapter->dev, "rx_pending=%d, tx_pending=%d, "
- "cmd_pending=%d\n",
- atomic_read(&adapter->rx_pending),
- atomic_read(&adapter->tx_pending),
- atomic_read(&adapter->cmd_pending));
+ mwifiex_dbg(adapter, ERROR,
+ "rx_pending=%d, tx_pending=%d,\t"
+ "cmd_pending=%d\n",
+ atomic_read(&adapter->rx_pending),
+ atomic_read(&adapter->tx_pending),
+ atomic_read(&adapter->cmd_pending));
}
for (i = 0; i < adapter->priv_num; i++) {
@@ -1264,11 +1376,13 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
wiphy_free(adapter->wiphy);
/* Unregister device */
- dev_dbg(adapter->dev, "info: unregister device\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: unregister device\n");
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
/* Free adapter structure */
- dev_dbg(adapter->dev, "info: free adapter\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: free adapter\n");
mwifiex_free_adapter(adapter);
exit_remove:
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index fe1256044a6c..5a6c1c76b33b 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -36,6 +36,7 @@
#include <linux/of.h>
#include <linux/idr.h>
#include <linux/inetdevice.h>
+#include <linux/devcoredump.h>
#include "decl.h"
#include "ioctl.h"
@@ -147,6 +148,54 @@ enum {
/* Address alignment */
#define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1))
+/**
+ *enum mwifiex_debug_level - marvell wifi debug level
+ */
+enum MWIFIEX_DEBUG_LEVEL {
+ MWIFIEX_DBG_MSG = 0x00000001,
+ MWIFIEX_DBG_FATAL = 0x00000002,
+ MWIFIEX_DBG_ERROR = 0x00000004,
+ MWIFIEX_DBG_DATA = 0x00000008,
+ MWIFIEX_DBG_CMD = 0x00000010,
+ MWIFIEX_DBG_EVENT = 0x00000020,
+ MWIFIEX_DBG_INTR = 0x00000040,
+ MWIFIEX_DBG_IOCTL = 0x00000080,
+
+ MWIFIEX_DBG_MPA_D = 0x00008000,
+ MWIFIEX_DBG_DAT_D = 0x00010000,
+ MWIFIEX_DBG_CMD_D = 0x00020000,
+ MWIFIEX_DBG_EVT_D = 0x00040000,
+ MWIFIEX_DBG_FW_D = 0x00080000,
+ MWIFIEX_DBG_IF_D = 0x00100000,
+
+ MWIFIEX_DBG_ENTRY = 0x10000000,
+ MWIFIEX_DBG_WARN = 0x20000000,
+ MWIFIEX_DBG_INFO = 0x40000000,
+ MWIFIEX_DBG_DUMP = 0x80000000,
+
+ MWIFIEX_DBG_ANY = 0xffffffff
+};
+
+#define MWIFIEX_DEFAULT_DEBUG_MASK (MWIFIEX_DBG_MSG | \
+ MWIFIEX_DBG_FATAL | \
+ MWIFIEX_DBG_ERROR)
+
+#define mwifiex_dbg(adapter, dbg_mask, fmt, args...) \
+do { \
+ if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask) \
+ if ((adapter)->dev) \
+ dev_info((adapter)->dev, fmt, ## args); \
+} while (0)
+
+#define DEBUG_DUMP_DATA_MAX_LEN 128
+#define mwifiex_dbg_dump(adapter, dbg_mask, str, buf, len) \
+do { \
+ if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask) \
+ print_hex_dump(KERN_DEBUG, str, \
+ DUMP_PREFIX_OFFSET, 16, 1, \
+ buf, len, false); \
+} while (0)
+
struct mwifiex_dbg {
u32 num_cmd_host_to_card_failure;
u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -451,7 +500,7 @@ enum rdwr_status {
};
enum mwifiex_iface_work_flags {
- MWIFIEX_IFACE_WORK_FW_DUMP,
+ MWIFIEX_IFACE_WORK_DEVICE_DUMP,
MWIFIEX_IFACE_WORK_CARD_RESET,
};
@@ -611,6 +660,7 @@ struct mwifiex_private {
struct delayed_work dfs_chan_sw_work;
struct cfg80211_beacon_data beacon_after;
struct mwifiex_11h_intf_state state_11h;
+ struct mwifiex_ds_mem_rw mem_rw;
};
@@ -740,8 +790,8 @@ struct mwifiex_if_ops {
int (*init_fw_port) (struct mwifiex_adapter *);
int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
void (*card_reset) (struct mwifiex_adapter *);
- void (*fw_dump)(struct mwifiex_adapter *);
int (*reg_dump)(struct mwifiex_adapter *, char *);
+ void (*device_dump)(struct mwifiex_adapter *);
int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
void (*iface_work)(struct work_struct *work);
void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter);
@@ -750,6 +800,7 @@ struct mwifiex_if_ops {
struct mwifiex_adapter {
u8 iface_type;
+ unsigned int debug_mask;
struct mwifiex_iface_comb iface_limit;
struct mwifiex_iface_comb curr_iface_comb;
struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
@@ -900,7 +951,6 @@ struct mwifiex_adapter {
u8 key_api_major_ver, key_api_minor_ver;
struct memory_type_mapping *mem_type_mapping_tbl;
u8 num_mem_types;
- u8 curr_mem_idx;
void *drv_info_dump;
u32 drv_info_size;
bool scan_chan_gap_enabled;
@@ -1434,7 +1484,8 @@ void mwifiex_hist_data_add(struct mwifiex_private *priv,
u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
u8 rx_rate, u8 ht_info);
-void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter);
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter);
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter);
void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index bcc7751d883c..77b9055a2d14 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -57,7 +57,7 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
if (pci_dma_mapping_error(card->dev, mapping.addr)) {
- dev_err(adapter->dev, "failed to map pci memory!\n");
+ mwifiex_dbg(adapter, ERROR, "failed to map pci memory!\n");
return -1;
}
mapping.len = size;
@@ -89,8 +89,9 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
if (card->sleep_cookie_vbase) {
cookie_addr = (u32 *)card->sleep_cookie_vbase;
- dev_dbg(adapter->dev, "info: ACCESS_HW: sleep cookie=0x%x\n",
- *cookie_addr);
+ mwifiex_dbg(adapter, INFO,
+ "info: ACCESS_HW: sleep cookie=0x%x\n",
+ *cookie_addr);
if (*cookie_addr == FW_AWAKE_COOKIE)
return true;
}
@@ -164,7 +165,8 @@ static int mwifiex_pcie_resume(struct device *dev)
adapter = card->adapter;
if (!adapter->is_suspended) {
- dev_warn(adapter->dev, "Device already resumed\n");
+ mwifiex_dbg(adapter, WARN,
+ "Device already resumed\n");
return 0;
}
@@ -361,16 +363,16 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
sleep_cookie = *(u32 *)buffer;
if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) {
- dev_dbg(adapter->dev,
- "sleep cookie found at count %d\n", count);
+ mwifiex_dbg(adapter, INFO,
+ "sleep cookie found at count %d\n", count);
break;
}
usleep_range(20, 30);
}
if (count >= max_delay_loop_cnt)
- dev_dbg(adapter->dev,
- "max count reached while accessing sleep cookie\n");
+ mwifiex_dbg(adapter, INFO,
+ "max count reached while accessing sleep cookie\n");
}
/* This function wakes up the card by reading fw_status register. */
@@ -380,20 +382,23 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- dev_dbg(adapter->dev, "event: Wakeup device...\n");
+ mwifiex_dbg(adapter, EVENT,
+ "event: Wakeup device...\n");
if (reg->sleep_cookie)
mwifiex_pcie_dev_wakeup_delay(adapter);
/* Reading fw_status register will wakeup device */
if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) {
- dev_warn(adapter->dev, "Reading fw_status register failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Reading fw_status register failed\n");
return -1;
}
if (reg->sleep_cookie) {
mwifiex_pcie_dev_wakeup_delay(adapter);
- dev_dbg(adapter->dev, "PCIE wakeup: Setting PS_STATE_AWAKE\n");
+ mwifiex_dbg(adapter, INFO,
+ "PCIE wakeup: Setting PS_STATE_AWAKE\n");
adapter->ps_state = PS_STATE_AWAKE;
}
@@ -407,7 +412,8 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
*/
static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
{
- dev_dbg(adapter->dev, "cmd: Wakeup device completed\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: Wakeup device completed\n");
return 0;
}
@@ -423,7 +429,8 @@ static int mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter)
if (mwifiex_pcie_ok_to_access_hw(adapter)) {
if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK,
0x00000000)) {
- dev_warn(adapter->dev, "Disable host interrupt failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Disable host interrupt failed\n");
return -1;
}
}
@@ -443,7 +450,8 @@ static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter)
/* Simply write the mask to the register */
if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK,
HOST_INTR_MASK)) {
- dev_warn(adapter->dev, "Enable host interrupt failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Enable host interrupt failed\n");
return -1;
}
}
@@ -499,8 +507,8 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
GFP_KERNEL | GFP_DMA);
if (!skb) {
- dev_err(adapter->dev,
- "Unable to allocate skb for RX ring.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Unable to allocate skb for RX ring.\n");
kfree(card->rxbd_ring_vbase);
return -ENOMEM;
}
@@ -512,10 +520,10 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
- dev_dbg(adapter->dev,
- "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
- skb, skb->len, skb->data, (u32)buf_pa,
- (u32)((u64)buf_pa >> 32));
+ mwifiex_dbg(adapter, INFO,
+ "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
+ skb, skb->len, skb->data, (u32)buf_pa,
+ (u32)((u64)buf_pa >> 32));
card->rx_buf_list[i] = skb;
if (reg->pfu_enabled) {
@@ -556,8 +564,8 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
/* Allocate skb here so that firmware can DMA data from it */
skb = dev_alloc_skb(MAX_EVENT_SIZE);
if (!skb) {
- dev_err(adapter->dev,
- "Unable to allocate skb for EVENT buf.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Unable to allocate skb for EVENT buf.\n");
kfree(card->evtbd_ring_vbase);
return -ENOMEM;
}
@@ -569,10 +577,10 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
- dev_dbg(adapter->dev,
- "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
- skb, skb->len, skb->data, (u32)buf_pa,
- (u32)((u64)buf_pa >> 32));
+ mwifiex_dbg(adapter, EVENT,
+ "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
+ skb, skb->len, skb->data, (u32)buf_pa,
+ (u32)((u64)buf_pa >> 32));
card->evt_buf_list[i] = skb;
card->evtbd_ring[i] = (void *)(card->evtbd_ring_vbase +
@@ -715,21 +723,23 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
MWIFIEX_MAX_TXRX_BD;
- dev_dbg(adapter->dev, "info: txbd_ring: Allocating %d bytes\n",
- card->txbd_ring_size);
+ mwifiex_dbg(adapter, INFO,
+ "info: txbd_ring: Allocating %d bytes\n",
+ card->txbd_ring_size);
card->txbd_ring_vbase = pci_alloc_consistent(card->dev,
card->txbd_ring_size,
&card->txbd_ring_pbase);
if (!card->txbd_ring_vbase) {
- dev_err(adapter->dev,
- "allocate consistent memory (%d bytes) failed!\n",
- card->txbd_ring_size);
+ mwifiex_dbg(adapter, ERROR,
+ "allocate consistent memory (%d bytes) failed!\n",
+ card->txbd_ring_size);
return -ENOMEM;
}
- dev_dbg(adapter->dev,
- "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n",
- card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase,
- (u32)((u64)card->txbd_ring_pbase >> 32), card->txbd_ring_size);
+ mwifiex_dbg(adapter, DATA,
+ "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n",
+ card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase,
+ (u32)((u64)card->txbd_ring_pbase >> 32),
+ card->txbd_ring_size);
return mwifiex_init_txq_ring(adapter);
}
@@ -777,23 +787,24 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
MWIFIEX_MAX_TXRX_BD;
- dev_dbg(adapter->dev, "info: rxbd_ring: Allocating %d bytes\n",
- card->rxbd_ring_size);
+ mwifiex_dbg(adapter, INFO,
+ "info: rxbd_ring: Allocating %d bytes\n",
+ card->rxbd_ring_size);
card->rxbd_ring_vbase = pci_alloc_consistent(card->dev,
card->rxbd_ring_size,
&card->rxbd_ring_pbase);
if (!card->rxbd_ring_vbase) {
- dev_err(adapter->dev,
- "allocate consistent memory (%d bytes) failed!\n",
- card->rxbd_ring_size);
+ mwifiex_dbg(adapter, ERROR,
+ "allocate consistent memory (%d bytes) failed!\n",
+ card->rxbd_ring_size);
return -ENOMEM;
}
- dev_dbg(adapter->dev,
- "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n",
- card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase,
- (u32)((u64)card->rxbd_ring_pbase >> 32),
- card->rxbd_ring_size);
+ mwifiex_dbg(adapter, DATA,
+ "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n",
+ card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase,
+ (u32)((u64)card->rxbd_ring_pbase >> 32),
+ card->rxbd_ring_size);
return mwifiex_init_rxq_ring(adapter);
}
@@ -840,23 +851,24 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
card->evtbd_ring_size = sizeof(struct mwifiex_evt_buf_desc) *
MWIFIEX_MAX_EVT_BD;
- dev_dbg(adapter->dev, "info: evtbd_ring: Allocating %d bytes\n",
+ mwifiex_dbg(adapter, INFO,
+ "info: evtbd_ring: Allocating %d bytes\n",
card->evtbd_ring_size);
card->evtbd_ring_vbase = pci_alloc_consistent(card->dev,
card->evtbd_ring_size,
&card->evtbd_ring_pbase);
if (!card->evtbd_ring_vbase) {
- dev_err(adapter->dev,
- "allocate consistent memory (%d bytes) failed!\n",
- card->evtbd_ring_size);
+ mwifiex_dbg(adapter, ERROR,
+ "allocate consistent memory (%d bytes) failed!\n",
+ card->evtbd_ring_size);
return -ENOMEM;
}
- dev_dbg(adapter->dev,
- "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n",
- card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase,
- (u32)((u64)card->evtbd_ring_pbase >> 32),
- card->evtbd_ring_size);
+ mwifiex_dbg(adapter, EVENT,
+ "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n",
+ card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase,
+ (u32)((u64)card->evtbd_ring_pbase >> 32),
+ card->evtbd_ring_size);
return mwifiex_pcie_init_evt_ring(adapter);
}
@@ -895,8 +907,8 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
/* Allocate memory for receiving command response data */
skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE);
if (!skb) {
- dev_err(adapter->dev,
- "Unable to allocate skb for command response data.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Unable to allocate skb for command response data.\n");
return -ENOMEM;
}
skb_put(skb, MWIFIEX_UPLD_SIZE);
@@ -944,14 +956,16 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32),
&card->sleep_cookie_pbase);
if (!card->sleep_cookie_vbase) {
- dev_err(adapter->dev, "pci_alloc_consistent failed!\n");
+ mwifiex_dbg(adapter, ERROR,
+ "pci_alloc_consistent failed!\n");
return -ENOMEM;
}
/* Init val of Sleep Cookie */
*(u32 *)card->sleep_cookie_vbase = FW_AWAKE_COOKIE;
- dev_dbg(adapter->dev, "alloc_scook: sleep cookie=0x%x\n",
- *((u32 *)card->sleep_cookie_vbase));
+ mwifiex_dbg(adapter, INFO,
+ "alloc_scook: sleep cookie=0x%x\n",
+ *((u32 *)card->sleep_cookie_vbase));
return 0;
}
@@ -993,8 +1007,8 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
*/
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_DNLD_RDY)) {
- dev_err(adapter->dev,
- "failed to assert dnld-rdy interrupt.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "failed to assert dnld-rdy interrupt.\n");
return -1;
}
}
@@ -1018,13 +1032,14 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
/* Read the TX ring read pointer set by firmware */
if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) {
- dev_err(adapter->dev,
- "SEND COMP: failed to read reg->tx_rdptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "SEND COMP: failed to read reg->tx_rdptr\n");
return -1;
}
- dev_dbg(adapter->dev, "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n",
- card->txbd_rdptr, rdptr);
+ mwifiex_dbg(adapter, DATA,
+ "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n",
+ card->txbd_rdptr, rdptr);
num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
/* free from previous txbd_rdptr to current txbd_rdptr */
@@ -1038,9 +1053,9 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
skb = card->tx_buf_list[wrdoneidx];
if (skb) {
- dev_dbg(adapter->dev,
- "SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
- skb, wrdoneidx);
+ mwifiex_dbg(adapter, DATA,
+ "SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
+ skb, wrdoneidx);
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_TODEVICE);
@@ -1112,8 +1127,9 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
__le16 *tmp;
if (!(skb->data && skb->len)) {
- dev_err(adapter->dev, "%s(): invalid parameter <%p, %#x>\n",
- __func__, skb->data, skb->len);
+ mwifiex_dbg(adapter, ERROR,
+ "%s(): invalid parameter <%p, %#x>\n",
+ __func__, skb->data, skb->len);
return -1;
}
@@ -1121,7 +1137,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
mwifiex_pm_wakeup_card(adapter);
num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
- dev_dbg(adapter->dev, "info: SEND DATA: <Rd: %#x, Wr: %#x>\n",
+ mwifiex_dbg(adapter, DATA,
+ "info: SEND DATA: <Rd: %#x, Wr: %#x>\n",
card->txbd_rdptr, card->txbd_wrptr);
if (mwifiex_pcie_txbd_not_full(card)) {
u8 *payload;
@@ -1175,39 +1192,40 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
/* Write the TX ring write pointer in to reg->tx_wrptr */
if (mwifiex_write_reg(adapter, reg->tx_wrptr,
card->txbd_wrptr | rx_val)) {
- dev_err(adapter->dev,
- "SEND DATA: failed to write reg->tx_wrptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "SEND DATA: failed to write reg->tx_wrptr\n");
ret = -1;
goto done_unmap;
}
if ((mwifiex_pcie_txbd_not_full(card)) &&
tx_param->next_pkt_len) {
/* have more packets and TxBD still can hold more */
- dev_dbg(adapter->dev,
- "SEND DATA: delay dnld-rdy interrupt.\n");
+ mwifiex_dbg(adapter, DATA,
+ "SEND DATA: delay dnld-rdy interrupt.\n");
adapter->data_sent = false;
} else {
/* Send the TX ready interrupt */
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_DNLD_RDY)) {
- dev_err(adapter->dev,
- "SEND DATA: failed to assert dnld-rdy interrupt.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "SEND DATA: failed to assert dnld-rdy interrupt.\n");
ret = -1;
goto done_unmap;
}
}
- dev_dbg(adapter->dev, "info: SEND DATA: Updated <Rd: %#x, Wr: "
- "%#x> and sent packet to firmware successfully\n",
- card->txbd_rdptr, card->txbd_wrptr);
+ mwifiex_dbg(adapter, DATA,
+ "info: SEND DATA: Updated <Rd: %#x, Wr:\t"
+ "%#x> and sent packet to firmware successfully\n",
+ card->txbd_rdptr, card->txbd_wrptr);
} else {
- dev_dbg(adapter->dev,
- "info: TX Ring full, can't send packets to fw\n");
+ mwifiex_dbg(adapter, DATA,
+ "info: TX Ring full, can't send packets to fw\n");
adapter->data_sent = true;
/* Send the TX ready interrupt */
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_DNLD_RDY))
- dev_err(adapter->dev,
- "SEND DATA: failed to assert door-bell intr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "SEND DATA: failed to assert door-bell intr\n");
return -EBUSY;
}
@@ -1243,8 +1261,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
/* Read the RX ring Write pointer set by firmware */
if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
- dev_err(adapter->dev,
- "RECV DATA: failed to read reg->rx_wrptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "RECV DATA: failed to read reg->rx_wrptr\n");
ret = -1;
goto done;
}
@@ -1277,15 +1295,15 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
rx_len = le16_to_cpu(pkt_len);
if (WARN_ON(rx_len <= INTF_HEADER_LEN ||
rx_len > MWIFIEX_RX_DATA_BUF_SIZE)) {
- dev_err(adapter->dev,
- "Invalid RX len %d, Rd=%#x, Wr=%#x\n",
- rx_len, card->rxbd_rdptr, wrptr);
+ mwifiex_dbg(adapter, ERROR,
+ "Invalid RX len %d, Rd=%#x, Wr=%#x\n",
+ rx_len, card->rxbd_rdptr, wrptr);
dev_kfree_skb_any(skb_data);
} else {
skb_put(skb_data, rx_len);
- dev_dbg(adapter->dev,
- "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n",
- card->rxbd_rdptr, wrptr, rx_len);
+ mwifiex_dbg(adapter, DATA,
+ "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n",
+ card->rxbd_rdptr, wrptr, rx_len);
skb_pull(skb_data, INTF_HEADER_LEN);
if (adapter->rx_work_enabled) {
skb_queue_tail(&adapter->rx_data_q, skb_data);
@@ -1299,8 +1317,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
GFP_KERNEL | GFP_DMA);
if (!skb_tmp) {
- dev_err(adapter->dev,
- "Unable to allocate skb.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Unable to allocate skb.\n");
return -ENOMEM;
}
@@ -1311,9 +1329,9 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
- dev_dbg(adapter->dev,
- "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
- skb_tmp, rd_index);
+ mwifiex_dbg(adapter, INFO,
+ "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
+ skb_tmp, rd_index);
card->rx_buf_list[rd_index] = skb_tmp;
if (reg->pfu_enabled) {
@@ -1336,28 +1354,29 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
reg->rx_rollover_ind) ^
reg->rx_rollover_ind);
}
- dev_dbg(adapter->dev, "info: RECV DATA: <Rd: %#x, Wr: %#x>\n",
- card->rxbd_rdptr, wrptr);
+ mwifiex_dbg(adapter, DATA,
+ "info: RECV DATA: <Rd: %#x, Wr: %#x>\n",
+ card->rxbd_rdptr, wrptr);
tx_val = card->txbd_wrptr & reg->tx_wrap_mask;
/* Write the RX ring read pointer in to reg->rx_rdptr */
if (mwifiex_write_reg(adapter, reg->rx_rdptr,
card->rxbd_rdptr | tx_val)) {
- dev_err(adapter->dev,
- "RECV DATA: failed to write reg->rx_rdptr\n");
+ mwifiex_dbg(adapter, DATA,
+ "RECV DATA: failed to write reg->rx_rdptr\n");
ret = -1;
goto done;
}
/* Read the RX ring Write pointer set by firmware */
if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
- dev_err(adapter->dev,
- "RECV DATA: failed to read reg->rx_wrptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "RECV DATA: failed to read reg->rx_wrptr\n");
ret = -1;
goto done;
}
- dev_dbg(adapter->dev,
- "info: RECV DATA: Rcvd packet from fw successfully\n");
+ mwifiex_dbg(adapter, DATA,
+ "info: RECV DATA: Rcvd packet from fw successfully\n");
card->rxbd_wrptr = wrptr;
}
@@ -1376,9 +1395,9 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
if (!(skb->data && skb->len)) {
- dev_err(adapter->dev,
- "Invalid parameter in %s <%p. len %d>\n",
- __func__, skb->data, skb->len);
+ mwifiex_dbg(adapter, ERROR,
+ "Invalid parameter in %s <%p. len %d>\n",
+ __func__, skb->data, skb->len);
return -1;
}
@@ -1391,9 +1410,9 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
* address scratch register
*/
if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa)) {
- dev_err(adapter->dev,
- "%s: failed to write download command to boot code.\n",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to write download command to boot code.\n",
+ __func__);
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1403,18 +1422,18 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
*/
if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
(u32)((u64)buf_pa >> 32))) {
- dev_err(adapter->dev,
- "%s: failed to write download command to boot code.\n",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to write download command to boot code.\n",
+ __func__);
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
/* Write the command length to cmd_size scratch register */
if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) {
- dev_err(adapter->dev,
- "%s: failed to write command len to cmd_size scratch reg\n",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to write command len to cmd_size scratch reg\n",
+ __func__);
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1422,8 +1441,8 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
/* Ring the door bell */
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_DOOR_BELL)) {
- dev_err(adapter->dev,
- "%s: failed to assert door-bell intr\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to assert door-bell intr\n", __func__);
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1443,8 +1462,8 @@ static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter)
/* Write the RX ring read pointer in to reg->rx_rdptr */
if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr |
tx_wrap)) {
- dev_err(adapter->dev,
- "RECV DATA: failed to write reg->rx_rdptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "RECV DATA: failed to write reg->rx_rdptr\n");
return -1;
}
return 0;
@@ -1462,15 +1481,16 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
u8 *payload = (u8 *)skb->data;
if (!(skb->data && skb->len)) {
- dev_err(adapter->dev, "Invalid parameter in %s <%p, %#x>\n",
- __func__, skb->data, skb->len);
+ mwifiex_dbg(adapter, ERROR,
+ "Invalid parameter in %s <%p, %#x>\n",
+ __func__, skb->data, skb->len);
return -1;
}
/* Make sure a command response buffer is available */
if (!card->cmdrsp_buf) {
- dev_err(adapter->dev,
- "No response buffer available, send command failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "No response buffer available, send command failed\n");
return -EBUSY;
}
@@ -1503,8 +1523,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
address */
if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo,
(u32)cmdrsp_buf_pa)) {
- dev_err(adapter->dev,
- "Failed to write download cmd to boot code.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
@@ -1512,8 +1532,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
address */
if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi,
(u32)((u64)cmdrsp_buf_pa >> 32))) {
- dev_err(adapter->dev,
- "Failed to write download cmd to boot code.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
@@ -1523,16 +1543,16 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
/* Write the lower 32bits of the physical address to reg->cmd_addr_lo */
if (mwifiex_write_reg(adapter, reg->cmd_addr_lo,
(u32)cmd_buf_pa)) {
- dev_err(adapter->dev,
- "Failed to write download cmd to boot code.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
/* Write the upper 32bits of the physical address to reg->cmd_addr_hi */
if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
(u32)((u64)cmd_buf_pa >> 32))) {
- dev_err(adapter->dev,
- "Failed to write download cmd to boot code.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
@@ -1540,8 +1560,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
/* Write the command length to reg->cmd_size */
if (mwifiex_write_reg(adapter, reg->cmd_size,
card->cmd_buf->len)) {
- dev_err(adapter->dev,
- "Failed to write cmd len to reg->cmd_size\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to write cmd len to reg->cmd_size\n");
ret = -1;
goto done;
}
@@ -1549,8 +1569,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
/* Ring the door bell */
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_DOOR_BELL)) {
- dev_err(adapter->dev,
- "Failed to assert door-bell intr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to assert door-bell intr\n");
ret = -1;
goto done;
}
@@ -1574,7 +1594,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
u16 rx_len;
__le16 pkt_len;
- dev_dbg(adapter->dev, "info: Rx CMD Response\n");
+ mwifiex_dbg(adapter, CMD,
+ "info: Rx CMD Response\n");
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
@@ -1598,8 +1619,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (mwifiex_write_reg(adapter,
PCIE_CPU_INT_EVENT,
CPU_INTR_SLEEP_CFM_DONE)) {
- dev_warn(adapter->dev,
- "Write register failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Write register failed\n");
return -1;
}
mwifiex_delay_for_sleep_cookie(adapter,
@@ -1608,8 +1629,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
mwifiex_pcie_ok_to_access_hw(adapter))
usleep_range(50, 60);
} else {
- dev_err(adapter->dev,
- "There is no command but got cmdrsp\n");
+ mwifiex_dbg(adapter, ERROR,
+ "There is no command but got cmdrsp\n");
}
memcpy(adapter->upld_buf, skb->data,
min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
@@ -1628,15 +1649,15 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
will prevent firmware from writing to the same response
buffer again. */
if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0)) {
- dev_err(adapter->dev,
- "cmd_done: failed to clear cmd_rsp_addr_lo\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cmd_done: failed to clear cmd_rsp_addr_lo\n");
return -1;
}
/* Write the upper 32bits of the cmdrsp buffer physical
address */
if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0)) {
- dev_err(adapter->dev,
- "cmd_done: failed to clear cmd_rsp_addr_hi\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cmd_done: failed to clear cmd_rsp_addr_hi\n");
return -1;
}
}
@@ -1678,25 +1699,28 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
mwifiex_pm_wakeup_card(adapter);
if (adapter->event_received) {
- dev_dbg(adapter->dev, "info: Event being processed, "
- "do not process this interrupt just yet\n");
+ mwifiex_dbg(adapter, EVENT,
+ "info: Event being processed,\t"
+ "do not process this interrupt just yet\n");
return 0;
}
if (rdptr >= MWIFIEX_MAX_EVT_BD) {
- dev_dbg(adapter->dev, "info: Invalid read pointer...\n");
+ mwifiex_dbg(adapter, ERROR,
+ "info: Invalid read pointer...\n");
return -1;
}
/* Read the event ring write pointer set by firmware */
if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
- dev_err(adapter->dev,
- "EventReady: failed to read reg->evt_wrptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "EventReady: failed to read reg->evt_wrptr\n");
return -1;
}
- dev_dbg(adapter->dev, "info: EventReady: Initial <Rd: 0x%x, Wr: 0x%x>",
- card->evtbd_rdptr, wrptr);
+ mwifiex_dbg(adapter, EVENT,
+ "info: EventReady: Initial <Rd: 0x%x, Wr: 0x%x>",
+ card->evtbd_rdptr, wrptr);
if (((wrptr & MWIFIEX_EVTBD_MASK) != (card->evtbd_rdptr
& MWIFIEX_EVTBD_MASK)) ||
((wrptr & reg->evt_rollover_ind) ==
@@ -1705,7 +1729,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
__le16 data_len = 0;
u16 evt_len;
- dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr);
+ mwifiex_dbg(adapter, INFO,
+ "info: Read Index: %d\n", rdptr);
skb_cmd = card->evt_buf_list[rdptr];
mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE);
@@ -1721,9 +1746,10 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
len is 2 bytes followed by type which is 2 bytes */
memcpy(&data_len, skb_cmd->data, sizeof(__le16));
evt_len = le16_to_cpu(data_len);
-
+ skb_trim(skb_cmd, evt_len);
skb_pull(skb_cmd, INTF_HEADER_LEN);
- dev_dbg(adapter->dev, "info: Event length: %d\n", evt_len);
+ mwifiex_dbg(adapter, EVENT,
+ "info: Event length: %d\n", evt_len);
if ((evt_len > 0) && (evt_len < MAX_EVENT_SIZE))
memcpy(adapter->event_body, skb_cmd->data +
@@ -1740,8 +1766,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
} else {
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_EVENT_DONE)) {
- dev_warn(adapter->dev,
- "Write register failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Write register failed\n");
return -1;
}
}
@@ -1766,15 +1792,16 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
return 0;
if (rdptr >= MWIFIEX_MAX_EVT_BD) {
- dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n",
- rdptr);
+ mwifiex_dbg(adapter, ERROR,
+ "event_complete: Invalid rdptr 0x%x\n",
+ rdptr);
return -EINVAL;
}
/* Read the event ring write pointer set by firmware */
if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
- dev_err(adapter->dev,
- "event_complete: failed to read reg->evt_wrptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "event_complete: failed to read reg->evt_wrptr\n");
return -1;
}
@@ -1791,9 +1818,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
desc->flags = 0;
skb = NULL;
} else {
- dev_dbg(adapter->dev,
- "info: ERROR: buf still valid at index %d, <%p, %p>\n",
- rdptr, card->evt_buf_list[rdptr], skb);
+ mwifiex_dbg(adapter, ERROR,
+ "info: ERROR: buf still valid at index %d, <%p, %p>\n",
+ rdptr, card->evt_buf_list[rdptr], skb);
}
if ((++card->evtbd_rdptr & MWIFIEX_EVTBD_MASK) == MWIFIEX_MAX_EVT_BD) {
@@ -1802,18 +1829,20 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
reg->evt_rollover_ind);
}
- dev_dbg(adapter->dev, "info: Updated <Rd: 0x%x, Wr: 0x%x>",
- card->evtbd_rdptr, wrptr);
+ mwifiex_dbg(adapter, EVENT,
+ "info: Updated <Rd: 0x%x, Wr: 0x%x>",
+ card->evtbd_rdptr, wrptr);
/* Write the event ring read pointer in to reg->evt_rdptr */
if (mwifiex_write_reg(adapter, reg->evt_rdptr,
card->evtbd_rdptr)) {
- dev_err(adapter->dev,
- "event_complete: failed to read reg->evt_rdptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "event_complete: failed to read reg->evt_rdptr\n");
return -1;
}
- dev_dbg(adapter->dev, "info: Check Events Again\n");
+ mwifiex_dbg(adapter, EVENT,
+ "info: Check Events Again\n");
ret = mwifiex_pcie_process_event_ready(adapter);
return ret;
@@ -1840,17 +1869,18 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
if (!firmware || !firmware_len) {
- dev_err(adapter->dev,
- "No firmware image found! Terminating download\n");
+ mwifiex_dbg(adapter, ERROR,
+ "No firmware image found! Terminating download\n");
return -1;
}
- dev_dbg(adapter->dev, "info: Downloading FW image (%d bytes)\n",
- firmware_len);
+ mwifiex_dbg(adapter, INFO,
+ "info: Downloading FW image (%d bytes)\n",
+ firmware_len);
if (mwifiex_pcie_disable_host_int(adapter)) {
- dev_err(adapter->dev,
- "%s: Disabling interrupts failed.\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: Disabling interrupts failed.\n", __func__);
return -1;
}
@@ -1872,8 +1902,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
ret = mwifiex_read_reg(adapter, reg->cmd_size,
&len);
if (ret) {
- dev_warn(adapter->dev,
- "Failed reading len from boot code\n");
+ mwifiex_dbg(adapter, FATAL,
+ "Failed reading len from boot code\n");
goto done;
}
if (len)
@@ -1884,8 +1914,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (!len) {
break;
} else if (len > MWIFIEX_UPLD_SIZE) {
- pr_err("FW download failure @ %d, invalid length %d\n",
- offset, len);
+ mwifiex_dbg(adapter, ERROR,
+ "FW download failure @ %d, invalid length %d\n",
+ offset, len);
ret = -1;
goto done;
}
@@ -1895,14 +1926,16 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (len & BIT(0)) {
block_retry_cnt++;
if (block_retry_cnt > MAX_WRITE_IOMEM_RETRY) {
- pr_err("FW download failure @ %d, over max "
- "retry count\n", offset);
+ mwifiex_dbg(adapter, ERROR,
+ "FW download failure @ %d, over max\t"
+ "retry count\n", offset);
ret = -1;
goto done;
}
- dev_err(adapter->dev, "FW CRC error indicated by the "
- "helper: len = 0x%04X, txlen = %d\n",
- len, txlen);
+ mwifiex_dbg(adapter, ERROR,
+ "FW CRC error indicated by the\t"
+ "helper: len = 0x%04X, txlen = %d\n",
+ len, txlen);
len &= ~BIT(0);
/* Setting this to 0 to resend from same offset */
txlen = 0;
@@ -1913,7 +1946,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (firmware_len - offset < txlen)
txlen = firmware_len - offset;
- dev_dbg(adapter->dev, ".");
+ mwifiex_dbg(adapter, INFO, ".");
tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) /
card->pcie.blksz_fw_dl;
@@ -1927,8 +1960,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
/* Send the boot command to device */
if (mwifiex_pcie_send_boot_cmd(adapter, skb)) {
- dev_err(adapter->dev,
- "Failed to send firmware download command\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to send firmware download command\n");
ret = -1;
goto done;
}
@@ -1937,9 +1970,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
do {
if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
&ireg_intr)) {
- dev_err(adapter->dev, "%s: Failed to read "
- "interrupt status during fw dnld.\n",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: Failed to read\t"
+ "interrupt status during fw dnld.\n",
+ __func__);
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_TODEVICE);
ret = -1;
@@ -1953,8 +1987,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
offset += txlen;
} while (true);
- dev_notice(adapter->dev,
- "info: FW download over, size %d bytes\n", offset);
+ mwifiex_dbg(adapter, MSG,
+ "info: FW download over, size %d bytes\n", offset);
ret = 0;
@@ -1980,15 +2014,17 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
/* Mask spurios interrupts */
if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS_MASK,
HOST_INTR_MASK)) {
- dev_warn(adapter->dev, "Write register failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Write register failed\n");
return -1;
}
- dev_dbg(adapter->dev, "Setting driver ready signature\n");
+ mwifiex_dbg(adapter, INFO,
+ "Setting driver ready signature\n");
if (mwifiex_write_reg(adapter, reg->drv_rdy,
FIRMWARE_READY_PCIE)) {
- dev_err(adapter->dev,
- "Failed to write driver ready signature\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to write driver ready signature\n");
return -1;
}
@@ -2015,12 +2051,13 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
&winner_status))
ret = -1;
else if (!winner_status) {
- dev_err(adapter->dev, "PCI-E is the winner\n");
+ mwifiex_dbg(adapter, INFO,
+ "PCI-E is the winner\n");
adapter->winner = 1;
} else {
- dev_err(adapter->dev,
- "PCI-E is not the winner <%#x,%d>, exit dnld\n",
- ret, adapter->winner);
+ mwifiex_dbg(adapter, ERROR,
+ "PCI-E is not the winner <%#x,%d>, exit dnld\n",
+ ret, adapter->winner);
}
}
@@ -2039,7 +2076,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
return;
if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) {
- dev_warn(adapter->dev, "Read register failed\n");
+ mwifiex_dbg(adapter, ERROR, "Read register failed\n");
return;
}
@@ -2050,7 +2087,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
/* Clear the pending interrupts */
if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS,
~pcie_ireg)) {
- dev_warn(adapter->dev, "Write register failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Write register failed\n");
return;
}
spin_lock_irqsave(&adapter->int_lock, flags);
@@ -2133,21 +2171,24 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
while (pcie_ireg & HOST_INTR_MASK) {
if (pcie_ireg & HOST_INTR_DNLD_DONE) {
pcie_ireg &= ~HOST_INTR_DNLD_DONE;
- dev_dbg(adapter->dev, "info: TX DNLD Done\n");
+ mwifiex_dbg(adapter, INTR,
+ "info: TX DNLD Done\n");
ret = mwifiex_pcie_send_data_complete(adapter);
if (ret)
return ret;
}
if (pcie_ireg & HOST_INTR_UPLD_RDY) {
pcie_ireg &= ~HOST_INTR_UPLD_RDY;
- dev_dbg(adapter->dev, "info: Rx DATA\n");
+ mwifiex_dbg(adapter, INTR,
+ "info: Rx DATA\n");
ret = mwifiex_pcie_process_recv_data(adapter);
if (ret)
return ret;
}
if (pcie_ireg & HOST_INTR_EVENT_RDY) {
pcie_ireg &= ~HOST_INTR_EVENT_RDY;
- dev_dbg(adapter->dev, "info: Rx EVENT\n");
+ mwifiex_dbg(adapter, INTR,
+ "info: Rx EVENT\n");
ret = mwifiex_pcie_process_event_ready(adapter);
if (ret)
return ret;
@@ -2156,8 +2197,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
if (pcie_ireg & HOST_INTR_CMD_DONE) {
pcie_ireg &= ~HOST_INTR_CMD_DONE;
if (adapter->cmd_sent) {
- dev_dbg(adapter->dev,
- "info: CMD sent Interrupt\n");
+ mwifiex_dbg(adapter, INTR,
+ "info: CMD sent Interrupt\n");
adapter->cmd_sent = false;
}
/* Handle command response */
@@ -2169,8 +2210,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
if (mwifiex_pcie_ok_to_access_hw(adapter)) {
if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
&pcie_ireg)) {
- dev_warn(adapter->dev,
- "Read register failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Read register failed\n");
return -1;
}
@@ -2178,16 +2219,17 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
if (mwifiex_write_reg(adapter,
PCIE_HOST_INT_STATUS,
~pcie_ireg)) {
- dev_warn(adapter->dev,
- "Write register failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Write register failed\n");
return -1;
}
}
}
}
- dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
- adapter->cmd_sent, adapter->data_sent);
+ mwifiex_dbg(adapter, INTR,
+ "info: cmd_sent=%d data_sent=%d\n",
+ adapter->cmd_sent, adapter->data_sent);
if (adapter->ps_state != PS_STATE_SLEEP)
mwifiex_pcie_enable_host_int(adapter);
@@ -2209,7 +2251,8 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
struct mwifiex_tx_param *tx_param)
{
if (!skb) {
- dev_err(adapter->dev, "Passed NULL skb to %s\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "Passed NULL skb to %s\n", __func__);
return -1;
}
@@ -2232,7 +2275,8 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY);
if (ret) {
- dev_err(adapter->dev, "PCIE write err\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PCIE write err\n");
return RDWR_STATUS_FAILURE;
}
@@ -2243,24 +2287,25 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
if (doneflag && ctrl_data == doneflag)
return RDWR_STATUS_DONE;
if (ctrl_data != FW_DUMP_HOST_READY) {
- dev_info(adapter->dev,
- "The ctrl reg was changed, re-try again!\n");
+ mwifiex_dbg(adapter, WARN,
+ "The ctrl reg was changed, re-try again!\n");
ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl,
FW_DUMP_HOST_READY);
if (ret) {
- dev_err(adapter->dev, "PCIE write err\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PCIE write err\n");
return RDWR_STATUS_FAILURE;
}
}
usleep_range(100, 200);
}
- dev_err(adapter->dev, "Fail to pull ctrl_data\n");
+ mwifiex_dbg(adapter, ERROR, "Fail to pull ctrl_data\n");
return RDWR_STATUS_FAILURE;
}
/* This function dump firmware memory to file */
-static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
+static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *creg = card->pcie.reg;
@@ -2269,7 +2314,6 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
enum rdwr_status stat;
u32 memory_size;
int ret;
- static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL };
if (!card->pcie.can_dump_fw)
return;
@@ -2284,12 +2328,12 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
entry->mem_size = 0;
}
- dev_info(adapter->dev, "== mwifiex firmware dump start ==\n");
+ mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump start ==\n");
/* Read the number of the memories which will dump */
stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
if (stat == RDWR_STATUS_FAILURE)
- goto done;
+ return;
reg = creg->fw_dump_start;
mwifiex_read_reg_byte(adapter, reg, &dump_num);
@@ -2300,7 +2344,7 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
if (stat == RDWR_STATUS_FAILURE)
- goto done;
+ return;
memory_size = 0;
reg = creg->fw_dump_start;
@@ -2311,36 +2355,36 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
}
if (memory_size == 0) {
- dev_info(adapter->dev, "Firmware dump Finished!\n");
+ mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n");
ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl,
FW_DUMP_READ_DONE);
if (ret) {
- dev_err(adapter->dev, "PCIE write err\n");
- goto done;
+ mwifiex_dbg(adapter, ERROR, "PCIE write err\n");
+ return;
}
break;
}
- dev_info(adapter->dev,
- "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
+ mwifiex_dbg(adapter, DUMP,
+ "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
entry->mem_ptr = vmalloc(memory_size + 1);
entry->mem_size = memory_size;
if (!entry->mem_ptr) {
- dev_err(adapter->dev,
- "Vmalloc %s failed\n", entry->mem_name);
- goto done;
+ mwifiex_dbg(adapter, ERROR,
+ "Vmalloc %s failed\n", entry->mem_name);
+ return;
}
dbg_ptr = entry->mem_ptr;
end_ptr = dbg_ptr + memory_size;
doneflag = entry->done_flag;
- dev_info(adapter->dev, "Start %s output, please wait...\n",
- entry->mem_name);
+ mwifiex_dbg(adapter, DUMP, "Start %s output, please wait...\n",
+ entry->mem_name);
do {
stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
if (RDWR_STATUS_FAILURE == stat)
- goto done;
+ return;
reg_start = creg->fw_dump_start;
reg_end = creg->fw_dump_end;
@@ -2349,46 +2393,49 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
if (dbg_ptr < end_ptr) {
dbg_ptr++;
} else {
- dev_err(adapter->dev,
- "Allocated buf not enough\n");
- goto done;
+ mwifiex_dbg(adapter, ERROR,
+ "Allocated buf not enough\n");
+ return;
}
}
if (stat != RDWR_STATUS_DONE)
continue;
- dev_info(adapter->dev, "%s done: size=0x%tx\n",
- entry->mem_name, dbg_ptr - entry->mem_ptr);
+ mwifiex_dbg(adapter, DUMP,
+ "%s done: size=0x%tx\n",
+ entry->mem_name, dbg_ptr - entry->mem_ptr);
break;
} while (true);
}
- dev_info(adapter->dev, "== mwifiex firmware dump end ==\n");
-
- kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env);
+ mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump end ==\n");
+}
-done:
- adapter->curr_mem_idx = 0;
+static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
+{
+ mwifiex_drv_info_dump(adapter);
+ mwifiex_pcie_fw_dump(adapter);
+ mwifiex_upload_device_dump(adapter);
}
static unsigned long iface_work_flags;
static struct mwifiex_adapter *save_adapter;
static void mwifiex_pcie_work(struct work_struct *work)
{
- if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
+ if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
&iface_work_flags))
- mwifiex_pcie_fw_dump_work(save_adapter);
+ mwifiex_pcie_device_dump_work(save_adapter);
}
static DECLARE_WORK(pcie_work, mwifiex_pcie_work);
/* This function dumps FW information */
-static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
+static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
{
save_adapter = adapter;
- if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
+ if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags))
return;
- set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
+ set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
schedule_work(&pcie_work);
}
@@ -2418,45 +2465,50 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
pci_set_master(pdev);
- dev_dbg(adapter->dev, "try set_consistent_dma_mask(32)\n");
+ mwifiex_dbg(adapter, INFO,
+ "try set_consistent_dma_mask(32)\n");
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- dev_err(adapter->dev, "set_dma_mask(32) failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "set_dma_mask(32) failed\n");
goto err_set_dma_mask;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- dev_err(adapter->dev, "set_consistent_dma_mask(64) failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "set_consistent_dma_mask(64) failed\n");
goto err_set_dma_mask;
}
ret = pci_request_region(pdev, 0, DRV_NAME);
if (ret) {
- dev_err(adapter->dev, "req_reg(0) error\n");
+ mwifiex_dbg(adapter, ERROR,
+ "req_reg(0) error\n");
goto err_req_region0;
}
card->pci_mmap = pci_iomap(pdev, 0, 0);
if (!card->pci_mmap) {
- dev_err(adapter->dev, "iomap(0) error\n");
+ mwifiex_dbg(adapter, ERROR, "iomap(0) error\n");
ret = -EIO;
goto err_iomap0;
}
ret = pci_request_region(pdev, 2, DRV_NAME);
if (ret) {
- dev_err(adapter->dev, "req_reg(2) error\n");
+ mwifiex_dbg(adapter, ERROR, "req_reg(2) error\n");
goto err_req_region2;
}
card->pci_mmap1 = pci_iomap(pdev, 2, 0);
if (!card->pci_mmap1) {
- dev_err(adapter->dev, "iomap(2) error\n");
+ mwifiex_dbg(adapter, ERROR,
+ "iomap(2) error\n");
ret = -EIO;
goto err_iomap2;
}
- dev_dbg(adapter->dev,
- "PCI memory map Virt0: %p PCI memory map Virt2: %p\n",
- card->pci_mmap, card->pci_mmap1);
+ mwifiex_dbg(adapter, INFO,
+ "PCI memory map Virt0: %p PCI memory map Virt2: %p\n",
+ card->pci_mmap, card->pci_mmap1);
card->cmdrsp_buf = NULL;
ret = mwifiex_pcie_create_txbd_ring(adapter);
@@ -2521,10 +2573,11 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
if (user_rmmod) {
- dev_dbg(adapter->dev, "Clearing driver ready signature\n");
+ mwifiex_dbg(adapter, INFO,
+ "Clearing driver ready signature\n");
if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
- dev_err(adapter->dev,
- "Failed to write driver not-ready signature\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to write driver not-ready signature\n");
}
if (pdev) {
@@ -2555,7 +2608,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED,
"MRVL_PCIE", pdev);
if (ret) {
- pr_err("request_irq failed: ret=%d\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "request_irq failed: ret=%d\n", ret);
adapter->card = NULL;
return -1;
}
@@ -2582,7 +2636,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
const struct mwifiex_pcie_card_reg *reg;
if (card) {
- dev_dbg(adapter->dev, "%s(): calling free_irq()\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "%s(): calling free_irq()\n", __func__);
free_irq(card->dev->irq, card->dev);
reg = card->pcie.reg;
@@ -2617,7 +2672,7 @@ static struct mwifiex_if_ops pcie_ops = {
.cleanup_mpa_buf = NULL,
.init_fw_port = mwifiex_pcie_init_fw_port,
.clean_pcie_ring = mwifiex_clean_pcie_ring_buf,
- .fw_dump = mwifiex_pcie_fw_dump,
+ .device_dump = mwifiex_pcie_device_dump,
};
/*
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 0ffdb7c5afd2..baf9715ddc10 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -241,20 +241,21 @@ mwifiex_is_bss_wpa(struct mwifiex_private *priv,
* LinkSys WRT54G && bss_desc->privacy
*/
) {
- dev_dbg(priv->adapter->dev, "info: %s: WPA:"
- " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
- "EncMode=%#x privacy=%#x\n", __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*(bss_desc->bcn_wpa_ie)).
- vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*(bss_desc->bcn_rsn_ie)).
- ieee_hdr.element_id : 0,
- (priv->sec_info.wep_enabled) ? "e" : "d",
- (priv->sec_info.wpa_enabled) ? "e" : "d",
- (priv->sec_info.wpa2_enabled) ? "e" : "d",
- priv->sec_info.encryption_mode,
- bss_desc->privacy);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: %s: WPA:\t"
+ "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
+ "EncMode=%#x privacy=%#x\n", __func__,
+ (bss_desc->bcn_wpa_ie) ?
+ (*bss_desc->bcn_wpa_ie).
+ vend_hdr.element_id : 0,
+ (bss_desc->bcn_rsn_ie) ?
+ (*bss_desc->bcn_rsn_ie).
+ ieee_hdr.element_id : 0,
+ (priv->sec_info.wep_enabled) ? "e" : "d",
+ (priv->sec_info.wpa_enabled) ? "e" : "d",
+ (priv->sec_info.wpa2_enabled) ? "e" : "d",
+ priv->sec_info.encryption_mode,
+ bss_desc->privacy);
return true;
}
return false;
@@ -277,20 +278,21 @@ mwifiex_is_bss_wpa2(struct mwifiex_private *priv,
* Privacy bit may NOT be set in some APs like
* LinkSys WRT54G && bss_desc->privacy
*/
- dev_dbg(priv->adapter->dev, "info: %s: WPA2: "
- " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
- "EncMode=%#x privacy=%#x\n", __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*(bss_desc->bcn_wpa_ie)).
- vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*(bss_desc->bcn_rsn_ie)).
- ieee_hdr.element_id : 0,
- (priv->sec_info.wep_enabled) ? "e" : "d",
- (priv->sec_info.wpa_enabled) ? "e" : "d",
- (priv->sec_info.wpa2_enabled) ? "e" : "d",
- priv->sec_info.encryption_mode,
- bss_desc->privacy);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: %s: WPA2:\t"
+ "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
+ "EncMode=%#x privacy=%#x\n", __func__,
+ (bss_desc->bcn_wpa_ie) ?
+ (*bss_desc->bcn_wpa_ie).
+ vend_hdr.element_id : 0,
+ (bss_desc->bcn_rsn_ie) ?
+ (*bss_desc->bcn_rsn_ie).
+ ieee_hdr.element_id : 0,
+ (priv->sec_info.wep_enabled) ? "e" : "d",
+ (priv->sec_info.wpa_enabled) ? "e" : "d",
+ (priv->sec_info.wpa2_enabled) ? "e" : "d",
+ priv->sec_info.encryption_mode,
+ bss_desc->privacy);
return true;
}
return false;
@@ -333,18 +335,19 @@ mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv,
((!bss_desc->bcn_rsn_ie) ||
((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
priv->sec_info.encryption_mode && bss_desc->privacy) {
- dev_dbg(priv->adapter->dev, "info: %s: dynamic "
- "WEP: wpa_ie=%#x wpa2_ie=%#x "
- "EncMode=%#x privacy=%#x\n",
- __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*(bss_desc->bcn_wpa_ie)).
- vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*(bss_desc->bcn_rsn_ie)).
- ieee_hdr.element_id : 0,
- priv->sec_info.encryption_mode,
- bss_desc->privacy);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: %s: dynamic\t"
+ "WEP: wpa_ie=%#x wpa2_ie=%#x\t"
+ "EncMode=%#x privacy=%#x\n",
+ __func__,
+ (bss_desc->bcn_wpa_ie) ?
+ (*bss_desc->bcn_wpa_ie).
+ vend_hdr.element_id : 0,
+ (bss_desc->bcn_rsn_ie) ?
+ (*bss_desc->bcn_rsn_ie).
+ ieee_hdr.element_id : 0,
+ priv->sec_info.encryption_mode,
+ bss_desc->privacy);
return true;
}
return false;
@@ -383,19 +386,20 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
return 0;
if (priv->wps.session_enable) {
- dev_dbg(adapter->dev,
- "info: return success directly in WPS period\n");
+ mwifiex_dbg(adapter, IOCTL,
+ "info: return success directly in WPS period\n");
return 0;
}
if (bss_desc->chan_sw_ie_present) {
- dev_err(adapter->dev,
- "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n");
+ mwifiex_dbg(adapter, INFO,
+ "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n");
return -1;
}
if (mwifiex_is_bss_wapi(priv, bss_desc)) {
- dev_dbg(adapter->dev, "info: return success for WAPI AP\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: return success for WAPI AP\n");
return 0;
}
@@ -405,7 +409,8 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
return 0;
} else if (mwifiex_is_bss_static_wep(priv, bss_desc)) {
/* Static WEP enabled */
- dev_dbg(adapter->dev, "info: Disable 11n in WEP mode.\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: Disable 11n in WEP mode.\n");
bss_desc->disable_11n = true;
return 0;
} else if (mwifiex_is_bss_wpa(priv, bss_desc)) {
@@ -418,9 +423,9 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
if (mwifiex_is_wpa_oui_present
(bss_desc, CIPHER_SUITE_TKIP)) {
- dev_dbg(adapter->dev,
- "info: Disable 11n if AES "
- "is not supported by AP\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: Disable 11n if AES\t"
+ "is not supported by AP\n");
bss_desc->disable_11n = true;
} else {
return -1;
@@ -437,9 +442,9 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
if (mwifiex_is_rsn_oui_present
(bss_desc, CIPHER_SUITE_TKIP)) {
- dev_dbg(adapter->dev,
- "info: Disable 11n if AES "
- "is not supported by AP\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: Disable 11n if AES\t"
+ "is not supported by AP\n");
bss_desc->disable_11n = true;
} else {
return -1;
@@ -455,17 +460,18 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
}
/* Security doesn't match */
- dev_dbg(adapter->dev,
- "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s "
- "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n", __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id : 0,
- (priv->sec_info.wep_enabled) ? "e" : "d",
- (priv->sec_info.wpa_enabled) ? "e" : "d",
- (priv->sec_info.wpa2_enabled) ? "e" : "d",
- priv->sec_info.encryption_mode, bss_desc->privacy);
+ mwifiex_dbg(adapter, ERROR,
+ "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s\t"
+ "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n",
+ __func__,
+ (bss_desc->bcn_wpa_ie) ?
+ (*bss_desc->bcn_wpa_ie).vend_hdr.element_id : 0,
+ (bss_desc->bcn_rsn_ie) ?
+ (*bss_desc->bcn_rsn_ie).ieee_hdr.element_id : 0,
+ (priv->sec_info.wep_enabled) ? "e" : "d",
+ (priv->sec_info.wpa_enabled) ? "e" : "d",
+ (priv->sec_info.wpa2_enabled) ? "e" : "d",
+ priv->sec_info.encryption_mode, bss_desc->privacy);
return -1;
}
@@ -560,7 +566,8 @@ mwifiex_append_rate_tlv(struct mwifiex_private *priv,
else
rates_size = mwifiex_get_supported_rates(priv, rates);
- dev_dbg(priv->adapter->dev, "info: SCAN_CMD: Rates size = %d\n",
+ mwifiex_dbg(priv->adapter, CMD,
+ "info: SCAN_CMD: Rates size = %d\n",
rates_size);
rates_tlv = (struct mwifiex_ie_types_rates_param_set *)tlv_pos;
rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
@@ -600,9 +607,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
u8 radio_type;
if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) {
- dev_dbg(priv->adapter->dev,
- "info: Scan: Null detect: %p, %p, %p\n",
- scan_cfg_out, chan_tlv_out, scan_chan_list);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "info: Scan: Null detect: %p, %p, %p\n",
+ scan_cfg_out, chan_tlv_out, scan_chan_list);
return -1;
}
@@ -645,16 +652,16 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
}
radio_type = tmp_chan_list->radio_type;
- dev_dbg(priv->adapter->dev,
- "info: Scan: Chan(%3d), Radio(%d),"
- " Mode(%d, %d), Dur(%d)\n",
- tmp_chan_list->chan_number,
- tmp_chan_list->radio_type,
- tmp_chan_list->chan_scan_mode_bitmap
- & MWIFIEX_PASSIVE_SCAN,
- (tmp_chan_list->chan_scan_mode_bitmap
- & MWIFIEX_DISABLE_CHAN_FILT) >> 1,
- le16_to_cpu(tmp_chan_list->max_scan_time));
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: Scan: Chan(%3d), Radio(%d),\t"
+ "Mode(%d, %d), Dur(%d)\n",
+ tmp_chan_list->chan_number,
+ tmp_chan_list->radio_type,
+ tmp_chan_list->chan_scan_mode_bitmap
+ & MWIFIEX_PASSIVE_SCAN,
+ (tmp_chan_list->chan_scan_mode_bitmap
+ & MWIFIEX_DISABLE_CHAN_FILT) >> 1,
+ le16_to_cpu(tmp_chan_list->max_scan_time));
/* Copy the current channel TLV to the command being
prepared */
@@ -718,9 +725,11 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
/* The total scan time should be less than scan command timeout
value */
if (total_scan_time > MWIFIEX_MAX_TOTAL_SCAN_TIME) {
- dev_err(priv->adapter->dev, "total scan time %dms"
- " is over limit (%dms), scan skipped\n",
- total_scan_time, MWIFIEX_MAX_TOTAL_SCAN_TIME);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "total scan time %dms\t"
+ "is over limit (%dms), scan skipped\n",
+ total_scan_time,
+ MWIFIEX_MAX_TOTAL_SCAN_TIME);
ret = -1;
break;
}
@@ -905,9 +914,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
tlv_pos += (sizeof(wildcard_ssid_tlv->header)
+ le16_to_cpu(wildcard_ssid_tlv->header.len));
- dev_dbg(adapter->dev, "info: scan: ssid[%d]: %s, %d\n",
- i, wildcard_ssid_tlv->ssid,
- wildcard_ssid_tlv->max_ssid_length);
+ mwifiex_dbg(adapter, INFO,
+ "info: scan: ssid[%d]: %s, %d\n",
+ i, wildcard_ssid_tlv->ssid,
+ wildcard_ssid_tlv->max_ssid_length);
/* Empty wildcard ssid with a maxlen will match many or
potentially all SSIDs (maxlen == 32), therefore do
@@ -928,8 +938,9 @@ mwifiex_config_scan(struct mwifiex_private *priv,
*filtered_scan = true;
if (user_scan_in->scan_chan_gap) {
- dev_dbg(adapter->dev, "info: scan: channel gap = %d\n",
- user_scan_in->scan_chan_gap);
+ mwifiex_dbg(adapter, INFO,
+ "info: scan: channel gap = %d\n",
+ user_scan_in->scan_chan_gap);
*max_chan_per_scan =
MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN;
@@ -961,8 +972,9 @@ mwifiex_config_scan(struct mwifiex_private *priv,
add tlv */
if (num_probes) {
- dev_dbg(adapter->dev, "info: scan: num_probes = %d\n",
- num_probes);
+ mwifiex_dbg(adapter, INFO,
+ "info: scan: num_probes = %d\n",
+ num_probes);
num_probes_tlv = (struct mwifiex_ie_types_num_probes *) tlv_pos;
num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES);
@@ -1003,7 +1015,8 @@ mwifiex_config_scan(struct mwifiex_private *priv,
if (user_scan_in && user_scan_in->chan_list[0].chan_number) {
- dev_dbg(adapter->dev, "info: Scan: Using supplied channel list\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: Scan: Using supplied channel list\n");
for (chan_idx = 0;
chan_idx < MWIFIEX_USER_SCAN_CHAN_MAX &&
@@ -1056,13 +1069,13 @@ mwifiex_config_scan(struct mwifiex_private *priv,
(user_scan_in->chan_list[0].chan_number ==
priv->curr_bss_params.bss_descriptor.channel)) {
*scan_current_only = true;
- dev_dbg(adapter->dev,
- "info: Scan: Scanning current channel only\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: Scan: Scanning current channel only\n");
}
chan_num = chan_idx;
} else {
- dev_dbg(adapter->dev,
- "info: Scan: Creating full region channel list\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: Scan: Creating full region channel list\n");
chan_num = mwifiex_scan_create_channel_list(priv, user_scan_in,
scan_chan_list,
*filtered_scan);
@@ -1094,8 +1107,9 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
tlv_buf_left = tlv_buf_size;
*tlv_data = NULL;
- dev_dbg(adapter->dev, "info: SCAN_RESP: tlv_buf_size = %d\n",
- tlv_buf_size);
+ mwifiex_dbg(adapter, INFO,
+ "info: SCAN_RESP: tlv_buf_size = %d\n",
+ tlv_buf_size);
while (tlv_buf_left >= sizeof(struct mwifiex_ie_types_header)) {
@@ -1103,26 +1117,31 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
tlv_len = le16_to_cpu(current_tlv->header.len);
if (sizeof(tlv->header) + tlv_len > tlv_buf_left) {
- dev_err(adapter->dev, "SCAN_RESP: TLV buffer corrupt\n");
+ mwifiex_dbg(adapter, ERROR,
+ "SCAN_RESP: TLV buffer corrupt\n");
break;
}
if (req_tlv_type == tlv_type) {
switch (tlv_type) {
case TLV_TYPE_TSFTIMESTAMP:
- dev_dbg(adapter->dev, "info: SCAN_RESP: TSF "
- "timestamp TLV, len = %d\n", tlv_len);
+ mwifiex_dbg(adapter, INFO,
+ "info: SCAN_RESP: TSF\t"
+ "timestamp TLV, len = %d\n",
+ tlv_len);
*tlv_data = current_tlv;
break;
case TLV_TYPE_CHANNELBANDLIST:
- dev_dbg(adapter->dev, "info: SCAN_RESP: channel"
- " band list TLV, len = %d\n", tlv_len);
+ mwifiex_dbg(adapter, INFO,
+ "info: SCAN_RESP: channel\t"
+ "band list TLV, len = %d\n",
+ tlv_len);
*tlv_data = current_tlv;
break;
default:
- dev_err(adapter->dev,
- "SCAN_RESP: unhandled TLV = %d\n",
- tlv_type);
+ mwifiex_dbg(adapter, ERROR,
+ "SCAN_RESP: unhandled TLV = %d\n",
+ tlv_type);
/* Give up, this seems corrupted */
return;
}
@@ -1177,8 +1196,9 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
total_ie_len = element_len + sizeof(struct ieee_types_header);
if (bytes_left < total_ie_len) {
- dev_err(adapter->dev, "err: InterpretIE: in processing"
- " IE, bytes left < IE length\n");
+ mwifiex_dbg(adapter, ERROR,
+ "err: InterpretIE: in processing\t"
+ "IE, bytes left < IE length\n");
return -1;
}
switch (element_id) {
@@ -1186,9 +1206,9 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
bss_entry->ssid.ssid_len = element_len;
memcpy(bss_entry->ssid.ssid, (current_ptr + 2),
element_len);
- dev_dbg(adapter->dev,
- "info: InterpretIE: ssid: %-32s\n",
- bss_entry->ssid.ssid);
+ mwifiex_dbg(adapter, INFO,
+ "info: InterpretIE: ssid: %-32s\n",
+ bss_entry->ssid.ssid);
break;
case WLAN_EID_SUPP_RATES:
@@ -1419,19 +1439,20 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
unsigned long flags;
if (adapter->scan_processing) {
- dev_err(adapter->dev, "cmd: Scan already in process...\n");
+ mwifiex_dbg(adapter, WARN,
+ "cmd: Scan already in process...\n");
return -EBUSY;
}
if (priv->scan_block) {
- dev_err(adapter->dev,
- "cmd: Scan is blocked during association...\n");
+ mwifiex_dbg(adapter, WARN,
+ "cmd: Scan is blocked during association...\n");
return -EBUSY;
}
if (adapter->surprise_removed || adapter->is_cmd_timedout) {
- dev_err(adapter->dev,
- "Ignore scan. Card removed or firmware in bad state\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Ignore scan. Card removed or firmware in bad state\n");
return -EFAULT;
}
@@ -1478,7 +1499,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
/* Perform internal scan synchronously */
if (!priv->scan_request) {
- dev_dbg(adapter->dev, "wait internal scan\n");
+ mwifiex_dbg(adapter, INFO,
+ "wait internal scan\n");
mwifiex_wait_queue_complete(adapter, cmd_node);
}
} else {
@@ -1553,8 +1575,8 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
ret = mwifiex_is_network_compatible(priv, bss_desc,
priv->bss_mode);
if (ret)
- dev_err(priv->adapter->dev,
- "Incompatible network settings\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Incompatible network settings\n");
break;
default:
ret = 0;
@@ -1656,7 +1678,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
*/
if (curr_bcn_bytes < ETH_ALEN + sizeof(u8) +
sizeof(struct mwifiex_fixed_bcn_param)) {
- dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
+ mwifiex_dbg(adapter, ERROR,
+ "InterpretIE: not enough bytes left\n");
return -EFAULT;
}
@@ -1669,7 +1692,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
rssi = (-rssi) * 100; /* Convert dBm to mBm */
current_ptr += sizeof(u8);
curr_bcn_bytes -= sizeof(u8);
- dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
+ mwifiex_dbg(adapter, INFO,
+ "info: InterpretIE: RSSI=%d\n", rssi);
} else {
rssi = rssi_val;
}
@@ -1682,14 +1706,16 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
beacon_period = le16_to_cpu(bcn_param->beacon_period);
cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
- dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
- cap_info_bitmap);
+ mwifiex_dbg(adapter, INFO,
+ "info: InterpretIE: capabilities=0x%X\n",
+ cap_info_bitmap);
/* Rest of the current buffer are IE's */
ie_buf = current_ptr;
ie_len = curr_bcn_bytes;
- dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n",
- curr_bcn_bytes);
+ mwifiex_dbg(adapter, INFO,
+ "info: InterpretIE: IELength for this AP = %d\n",
+ curr_bcn_bytes);
while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
u8 element_id, element_len;
@@ -1698,8 +1724,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
element_len = *(current_ptr + 1);
if (curr_bcn_bytes < element_len +
sizeof(struct ieee_types_header)) {
- dev_err(adapter->dev,
- "%s: bytes left < IE length\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: bytes left < IE length\n", __func__);
return -EFAULT;
}
if (element_id == WLAN_EID_DS_PARAMS) {
@@ -1719,8 +1745,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
/* Skip entry if on csa closed channel */
if (channel == priv->csa_chan) {
- dev_dbg(adapter->dev,
- "Dropping entry on csa closed channel\n");
+ mwifiex_dbg(adapter, WARN,
+ "Dropping entry on csa closed channel\n");
return 0;
}
@@ -1751,7 +1777,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
cfg80211_put_bss(priv->wdev.wiphy, bss);
}
} else {
- dev_dbg(adapter->dev, "missing BSS channel IE\n");
+ mwifiex_dbg(adapter, WARN, "missing BSS channel IE\n");
}
return 0;
@@ -1765,7 +1791,8 @@ static void mwifiex_complete_scan(struct mwifiex_private *priv)
if (adapter->curr_cmd->wait_q_enabled) {
adapter->cmd_wait_q.status = 0;
if (!priv->scan_request) {
- dev_dbg(adapter->dev, "complete internal scan\n");
+ mwifiex_dbg(adapter, INFO,
+ "complete internal scan\n");
mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
}
@@ -1788,12 +1815,14 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
mwifiex_complete_scan(priv);
if (priv->scan_request) {
- dev_dbg(adapter->dev, "info: notifying scan done\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: notifying scan done\n");
cfg80211_scan_done(priv->scan_request, 0);
priv->scan_request = NULL;
} else {
priv->scan_aborting = false;
- dev_dbg(adapter->dev, "info: scan already aborted\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: scan already aborted\n");
}
} else if ((priv->scan_aborting && !priv->scan_request) ||
priv->scan_block) {
@@ -1809,12 +1838,14 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
if (priv->scan_request) {
- dev_dbg(adapter->dev, "info: aborting scan\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: aborting scan\n");
cfg80211_scan_done(priv->scan_request, 1);
priv->scan_request = NULL;
} else {
priv->scan_aborting = false;
- dev_dbg(adapter->dev, "info: scan already aborted\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: scan already aborted\n");
}
} else {
/* Get scan command from scan_pending_q and put to
@@ -1877,8 +1908,9 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
if (scan_rsp->number_of_sets > MWIFIEX_MAX_AP) {
- dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
- scan_rsp->number_of_sets);
+ mwifiex_dbg(adapter, ERROR,
+ "SCAN_RESP: too many AP returned (%d)\n",
+ scan_rsp->number_of_sets);
ret = -1;
goto check_next_scan;
}
@@ -1887,14 +1919,15 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
mwifiex_11h_get_csa_closed_channel(priv);
bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
- dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n",
- bytes_left);
+ mwifiex_dbg(adapter, INFO,
+ "info: SCAN_RESP: bss_descript_size %d\n",
+ bytes_left);
scan_resp_size = le16_to_cpu(resp->size);
- dev_dbg(adapter->dev,
- "info: SCAN_RESP: returned %d APs before parsing\n",
- scan_rsp->number_of_sets);
+ mwifiex_dbg(adapter, INFO,
+ "info: SCAN_RESP: returned %d APs before parsing\n",
+ scan_rsp->number_of_sets);
bss_info = scan_rsp->bss_desc_and_tlv_buffer;
@@ -2007,13 +2040,13 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv,
le16_to_cpu(fw_chan_stats->cca_scan_dur);
chan_stats.cca_busy_dur =
le16_to_cpu(fw_chan_stats->cca_busy_dur);
- dev_dbg(adapter->dev,
- "chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n",
- chan_stats.chan_num,
- chan_stats.noise,
- chan_stats.total_bss,
- chan_stats.cca_scan_dur,
- chan_stats.cca_busy_dur);
+ mwifiex_dbg(adapter, INFO,
+ "chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n",
+ chan_stats.chan_num,
+ chan_stats.noise,
+ chan_stats.total_bss,
+ chan_stats.cca_scan_dur,
+ chan_stats.cca_busy_dur);
memcpy(&adapter->chan_stats[adapter->survey_idx++], &chan_stats,
sizeof(struct mwifiex_chan_stats));
fw_chan_stats++;
@@ -2035,7 +2068,7 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
unsigned long cmd_flags, scan_flags;
bool complete_scan = false;
- dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
+ mwifiex_dbg(adapter, INFO, "info: EXT scan returns successfully\n");
ext_scan_resp = &resp->params.ext_scan;
@@ -2048,8 +2081,8 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
len = le16_to_cpu(tlv->len);
if (buf_left < (sizeof(struct mwifiex_ie_types_header) + len)) {
- dev_err(adapter->dev,
- "error processing scan response TLVs");
+ mwifiex_dbg(adapter, ERROR,
+ "error processing scan response TLVs");
break;
}
@@ -2075,8 +2108,8 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
cmd_ptr = (void *)cmd_node->cmd_skb->data;
if (le16_to_cpu(cmd_ptr->command) ==
HostCmd_CMD_802_11_SCAN_EXT) {
- dev_dbg(priv->adapter->dev,
- "Scan pending in command pending list");
+ mwifiex_dbg(adapter, INFO,
+ "Scan pending in command pending list");
complete_scan = false;
break;
}
@@ -2114,17 +2147,20 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
u16 scan_resp_size = le16_to_cpu(event_scan->buf_size);
if (num_of_set > MWIFIEX_MAX_AP) {
- dev_err(adapter->dev,
- "EXT_SCAN: Invalid number of AP returned (%d)!!\n",
- num_of_set);
+ mwifiex_dbg(adapter, ERROR,
+ "EXT_SCAN: Invalid number of AP returned (%d)!!\n",
+ num_of_set);
ret = -1;
goto check_next_scan;
}
bytes_left = scan_resp_size;
- dev_dbg(adapter->dev,
- "EXT_SCAN: size %d, returned %d APs...",
- scan_resp_size, num_of_set);
+ mwifiex_dbg(adapter, INFO,
+ "EXT_SCAN: size %d, returned %d APs...",
+ scan_resp_size, num_of_set);
+ mwifiex_dbg_dump(adapter, CMD_D, "EXT_SCAN buffer:", buf,
+ scan_resp_size +
+ sizeof(struct mwifiex_event_scan_result));
tlv = (struct mwifiex_ie_types_data *)scan_resp;
@@ -2132,7 +2168,8 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
type = le16_to_cpu(tlv->header.type);
len = le16_to_cpu(tlv->header.len);
if (bytes_left < sizeof(struct mwifiex_ie_types_header) + len) {
- dev_err(adapter->dev, "EXT_SCAN: Error bytes left < TLV length\n");
+ mwifiex_dbg(adapter, ERROR,
+ "EXT_SCAN: Error bytes left < TLV length\n");
break;
}
scan_rsp_tlv = NULL;
@@ -2158,8 +2195,9 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
len = le16_to_cpu(tlv->header.len);
if (bytes_left_for_tlv <
sizeof(struct mwifiex_ie_types_header) + len) {
- dev_err(adapter->dev,
- "EXT_SCAN: Error in processing TLV, bytes left < TLV length\n");
+ mwifiex_dbg(adapter, ERROR,
+ "EXT_SCAN: Error in processing TLV,\t"
+ "bytes left < TLV length\n");
scan_rsp_tlv = NULL;
bytes_left_for_tlv = 0;
continue;
@@ -2199,8 +2237,8 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
if (scan_info_tlv) {
rssi = (s32)(s16)(le16_to_cpu(scan_info_tlv->rssi));
rssi *= 100; /* Convert dBm to mBm */
- dev_dbg(adapter->dev,
- "info: InterpretIE: RSSI=%d\n", rssi);
+ mwifiex_dbg(adapter, INFO,
+ "info: InterpretIE: RSSI=%d\n", rssi);
fw_tsf = le64_to_cpu(scan_info_tlv->tsf);
radio_type = &scan_info_tlv->radio_type;
} else {
@@ -2271,13 +2309,14 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
struct mwifiex_user_scan_cfg *scan_cfg;
if (adapter->scan_processing) {
- dev_err(adapter->dev, "cmd: Scan already in process...\n");
+ mwifiex_dbg(adapter, WARN,
+ "cmd: Scan already in process...\n");
return -EBUSY;
}
if (priv->scan_block) {
- dev_err(adapter->dev,
- "cmd: Scan is blocked during association...\n");
+ mwifiex_dbg(adapter, WARN,
+ "cmd: Scan is blocked during association...\n");
return -EBUSY;
}
@@ -2309,8 +2348,9 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
int ret;
if (down_interruptible(&priv->async_sem)) {
- dev_err(priv->adapter->dev, "%s: acquire semaphore\n",
- __func__);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: acquire semaphore fail\n",
+ __func__);
return -1;
}
@@ -2400,8 +2440,9 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
memcpy(priv->curr_bcn_buf, curr_bss->beacon_buf,
curr_bss->beacon_buf_size);
- dev_dbg(priv->adapter->dev, "info: current beacon saved %d\n",
- priv->curr_bcn_size);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: current beacon saved %d\n",
+ priv->curr_bcn_size);
curr_bss->beacon_buf = priv->curr_bcn_buf;
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index d10320f89bc1..a0b121f3460c 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -166,7 +166,8 @@ static int mwifiex_sdio_resume(struct device *dev)
adapter = card->adapter;
if (!adapter->is_suspended) {
- dev_warn(adapter->dev, "device already resumed\n");
+ mwifiex_dbg(adapter, WARN,
+ "device already resumed\n");
return 0;
}
@@ -191,8 +192,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
struct mwifiex_adapter *adapter;
struct mwifiex_private *priv;
- pr_debug("info: SDIO func num=%d\n", func->num);
-
card = sdio_get_drvdata(func);
if (!card)
return;
@@ -201,6 +200,8 @@ mwifiex_sdio_remove(struct sdio_func *func)
if (!adapter || !adapter->priv_num)
return;
+ mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num);
+
if (user_rmmod) {
if (adapter->is_suspended)
mwifiex_sdio_resume(adapter->dev);
@@ -257,12 +258,14 @@ static int mwifiex_sdio_suspend(struct device *dev)
/* Enable the Host Sleep */
if (!mwifiex_enable_hs(adapter)) {
- dev_err(adapter->dev, "cmd: failed to suspend\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cmd: failed to suspend\n");
adapter->hs_enabling = false;
return -EFAULT;
}
- dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n");
+ mwifiex_dbg(adapter, INFO,
+ "cmd: suspend with MMC_PM_KEEP_POWER\n");
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
/* Indicate device suspended */
@@ -386,8 +389,8 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
if (adapter->is_suspended) {
- dev_err(adapter->dev,
- "%s: not allowed while suspended\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: not allowed while suspended\n", __func__);
return -1;
}
@@ -434,7 +437,8 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
*/
static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
- dev_dbg(adapter->dev, "event: wakeup device...\n");
+ mwifiex_dbg(adapter, EVENT,
+ "event: wakeup device...\n");
return mwifiex_write_reg(adapter, CONFIGURATION_REG, HOST_POWER_UP);
}
@@ -446,7 +450,8 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
*/
static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
{
- dev_dbg(adapter->dev, "cmd: wakeup device completed\n");
+ mwifiex_dbg(adapter, EVENT,
+ "cmd: wakeup device completed\n");
return mwifiex_write_reg(adapter, CONFIGURATION_REG, 0);
}
@@ -524,7 +529,8 @@ static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter)
else
return -1;
cont:
- pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport);
+ mwifiex_dbg(adapter, INFO,
+ "info: SDIO FUNC1 IO port: %#x\n", adapter->ioport);
/* Set Host interrupt reset to read to clear */
if (!mwifiex_read_reg(adapter, card->reg->host_int_rsr_reg, &reg))
@@ -556,10 +562,12 @@ static int mwifiex_write_data_to_card(struct mwifiex_adapter *adapter,
ret = mwifiex_write_data_sync(adapter, payload, pkt_len, port);
if (ret) {
i++;
- dev_err(adapter->dev, "host_to_card, write iomem"
- " (%d) failed: %d\n", i, ret);
+ mwifiex_dbg(adapter, ERROR,
+ "host_to_card, write iomem\t"
+ "(%d) failed: %d\n", i, ret);
if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04))
- dev_err(adapter->dev, "write CFG reg failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "write CFG reg failed\n");
ret = -1;
if (i > MAX_WRITE_IOMEM_RETRY)
@@ -584,7 +592,8 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
const struct mwifiex_sdio_card_reg *reg = card->reg;
u32 rd_bitmap = card->mp_rd_bitmap;
- dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%08x\n", rd_bitmap);
+ mwifiex_dbg(adapter, DATA,
+ "data: mp_rd_bitmap=0x%08x\n", rd_bitmap);
if (card->supports_sdio_new_mode) {
if (!(rd_bitmap & reg->data_port_mask))
@@ -598,8 +607,9 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
(card->mp_rd_bitmap & CTRL_PORT_MASK)) {
card->mp_rd_bitmap &= (u32) (~CTRL_PORT_MASK);
*port = CTRL_PORT;
- dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%08x\n",
- *port, card->mp_rd_bitmap);
+ mwifiex_dbg(adapter, DATA,
+ "data: port=%d mp_rd_bitmap=0x%08x\n",
+ *port, card->mp_rd_bitmap);
return 0;
}
@@ -613,9 +623,9 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
if (++card->curr_rd_port == card->max_ports)
card->curr_rd_port = reg->start_rd_port;
- dev_dbg(adapter->dev,
- "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n",
- *port, rd_bitmap, card->mp_rd_bitmap);
+ mwifiex_dbg(adapter, DATA,
+ "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n",
+ *port, rd_bitmap, card->mp_rd_bitmap);
return 0;
}
@@ -633,7 +643,8 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port)
const struct mwifiex_sdio_card_reg *reg = card->reg;
u32 wr_bitmap = card->mp_wr_bitmap;
- dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%08x\n", wr_bitmap);
+ mwifiex_dbg(adapter, DATA,
+ "data: mp_wr_bitmap=0x%08x\n", wr_bitmap);
if (!(wr_bitmap & card->mp_data_port_mask)) {
adapter->data_sent = true;
@@ -651,15 +662,16 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port)
}
if ((card->has_control_mask) && (*port == CTRL_PORT)) {
- dev_err(adapter->dev,
- "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
- *port, card->curr_wr_port, wr_bitmap,
- card->mp_wr_bitmap);
+ mwifiex_dbg(adapter, ERROR,
+ "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
+ *port, card->curr_wr_port, wr_bitmap,
+ card->mp_wr_bitmap);
return -1;
}
- dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
- *port, wr_bitmap, card->mp_wr_bitmap);
+ mwifiex_dbg(adapter, DATA,
+ "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
+ *port, wr_bitmap, card->mp_wr_bitmap);
return 0;
}
@@ -683,7 +695,8 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
usleep_range(10, 20);
}
- dev_err(adapter->dev, "poll card status failed, tries = %d\n", tries);
+ mwifiex_dbg(adapter, ERROR,
+ "poll card status failed, tries = %d\n", tries);
return -1;
}
@@ -738,7 +751,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
if (mwifiex_read_data_sync(adapter, card->mp_regs,
card->reg->max_mp_regs,
REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
- dev_err(adapter->dev, "read mp_regs failed\n");
+ mwifiex_dbg(adapter, ERROR, "read mp_regs failed\n");
return;
}
@@ -751,7 +764,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
* UP_LD_CMD_PORT_HOST_INT_STATUS
* Clear the interrupt status register
*/
- dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
+ mwifiex_dbg(adapter, INTR,
+ "int: sdio_ireg = %#x\n", sdio_ireg);
spin_lock_irqsave(&adapter->int_lock, flags);
adapter->int_status |= sdio_ireg;
spin_unlock_irqrestore(&adapter->int_lock, flags);
@@ -802,7 +816,8 @@ static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
/* Request the SDIO IRQ */
ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
if (ret) {
- dev_err(adapter->dev, "claim irq failed: ret=%d\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "claim irq failed: ret=%d\n", ret);
goto out;
}
@@ -810,7 +825,8 @@ static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
ret = mwifiex_write_reg_locked(func, card->reg->host_int_mask_reg,
card->reg->host_int_enable);
if (ret) {
- dev_err(adapter->dev, "enable host interrupt failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "enable host interrupt failed\n");
sdio_release_irq(func);
}
@@ -830,22 +846,25 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter,
u32 nb;
if (!buffer) {
- dev_err(adapter->dev, "%s: buffer is NULL\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: buffer is NULL\n", __func__);
return -1;
}
ret = mwifiex_read_data_sync(adapter, buffer, npayload, ioport, 1);
if (ret) {
- dev_err(adapter->dev, "%s: read iomem failed: %d\n", __func__,
+ mwifiex_dbg(adapter, ERROR,
+ "%s: read iomem failed: %d\n", __func__,
ret);
return -1;
}
nb = le16_to_cpu(*(__le16 *) (buffer));
if (nb > npayload) {
- dev_err(adapter->dev, "%s: invalid packet, nb=%d npayload=%d\n",
- __func__, nb, npayload);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: invalid packet, nb=%d npayload=%d\n",
+ __func__, nb, npayload);
return -1;
}
@@ -877,13 +896,14 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
u32 i = 0;
if (!firmware_len) {
- dev_err(adapter->dev,
- "firmware image not found! Terminating download\n");
+ mwifiex_dbg(adapter, ERROR,
+ "firmware image not found! Terminating download\n");
return -1;
}
- dev_dbg(adapter->dev, "info: downloading FW image (%d bytes)\n",
- firmware_len);
+ mwifiex_dbg(adapter, INFO,
+ "info: downloading FW image (%d bytes)\n",
+ firmware_len);
/* Assume that the allocated buffer is 8-byte aligned */
fwbuf = kzalloc(MWIFIEX_UPLD_SIZE, GFP_KERNEL);
@@ -897,8 +917,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
ret = mwifiex_sdio_poll_card_status(adapter, CARD_IO_READY |
DN_LD_CARD_RDY);
if (ret) {
- dev_err(adapter->dev, "FW download with helper:"
- " poll status timeout @ %d\n", offset);
+ mwifiex_dbg(adapter, ERROR,
+ "FW download with helper:\t"
+ "poll status timeout @ %d\n", offset);
goto done;
}
@@ -910,19 +931,19 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
ret = mwifiex_read_reg(adapter, reg->base_0_reg,
&base0);
if (ret) {
- dev_err(adapter->dev,
- "dev BASE0 register read failed: "
- "base0=%#04X(%d). Terminating dnld\n",
- base0, base0);
+ mwifiex_dbg(adapter, ERROR,
+ "dev BASE0 register read failed:\t"
+ "base0=%#04X(%d). Terminating dnld\n",
+ base0, base0);
goto done;
}
ret = mwifiex_read_reg(adapter, reg->base_1_reg,
&base1);
if (ret) {
- dev_err(adapter->dev,
- "dev BASE1 register read failed: "
- "base1=%#04X(%d). Terminating dnld\n",
- base1, base1);
+ mwifiex_dbg(adapter, ERROR,
+ "dev BASE1 register read failed:\t"
+ "base1=%#04X(%d). Terminating dnld\n",
+ base1, base1);
goto done;
}
len = (u16) (((base1 & 0xff) << 8) | (base0 & 0xff));
@@ -936,9 +957,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (!len) {
break;
} else if (len > MWIFIEX_UPLD_SIZE) {
- dev_err(adapter->dev,
- "FW dnld failed @ %d, invalid length %d\n",
- offset, len);
+ mwifiex_dbg(adapter, ERROR,
+ "FW dnld failed @ %d, invalid length %d\n",
+ offset, len);
ret = -1;
goto done;
}
@@ -948,14 +969,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (len & BIT(0)) {
i++;
if (i > MAX_WRITE_IOMEM_RETRY) {
- dev_err(adapter->dev,
- "FW dnld failed @ %d, over max retry\n",
- offset);
+ mwifiex_dbg(adapter, ERROR,
+ "FW dnld failed @ %d, over max retry\n",
+ offset);
ret = -1;
goto done;
}
- dev_err(adapter->dev, "CRC indicated by the helper:"
- " len = 0x%04X, txlen = %d\n", len, txlen);
+ mwifiex_dbg(adapter, ERROR,
+ "CRC indicated by the helper:\t"
+ "len = 0x%04X, txlen = %d\n", len, txlen);
len &= ~BIT(0);
/* Setting this to 0 to resend from same offset */
txlen = 0;
@@ -978,11 +1000,12 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
MWIFIEX_SDIO_BLOCK_SIZE,
adapter->ioport);
if (ret) {
- dev_err(adapter->dev,
- "FW download, write iomem (%d) failed @ %d\n",
- i, offset);
+ mwifiex_dbg(adapter, ERROR,
+ "FW download, write iomem (%d) failed @ %d\n",
+ i, offset);
if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04))
- dev_err(adapter->dev, "write CFG reg failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "write CFG reg failed\n");
ret = -1;
goto done;
@@ -991,8 +1014,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
offset += txlen;
} while (true);
- dev_notice(adapter->dev,
- "info: FW download over, size %d bytes\n", offset);
+ mwifiex_dbg(adapter, MSG,
+ "info: FW download over, size %d bytes\n", offset);
ret = 0;
done:
@@ -1066,18 +1089,20 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
blk_num = *(data + BLOCK_NUMBER_OFFSET);
blk_size = adapter->sdio_rx_block_size * blk_num;
if (blk_size > total_pkt_len) {
- dev_err(adapter->dev, "%s: error in pkt,\t"
- "blk_num=%d, blk_size=%d, total_pkt_len=%d\n",
- __func__, blk_num, blk_size, total_pkt_len);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: error in blk_size,\t"
+ "blk_num=%d, blk_size=%d, total_pkt_len=%d\n",
+ __func__, blk_num, blk_size, total_pkt_len);
break;
}
pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET));
pkt_type = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET +
2));
if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) {
- dev_err(adapter->dev, "%s: error in pkt,\t"
- "pkt_len=%d, blk_size=%d\n",
- __func__, pkt_len, blk_size);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: error in pkt_len,\t"
+ "pkt_len=%d, blk_size=%d\n",
+ __func__, pkt_len, blk_size);
break;
}
skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len,
@@ -1116,7 +1141,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
switch (upld_typ) {
case MWIFIEX_TYPE_AGGR_DATA:
- dev_dbg(adapter->dev, "info: --- Rx: Aggr Data packet ---\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: --- Rx: Aggr Data packet ---\n");
rx_info = MWIFIEX_SKB_RXCB(skb);
rx_info->buf_type = MWIFIEX_TYPE_AGGR_DATA;
if (adapter->rx_work_enabled) {
@@ -1130,7 +1156,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
break;
case MWIFIEX_TYPE_DATA:
- dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n");
+ mwifiex_dbg(adapter, DATA,
+ "info: --- Rx: Data packet ---\n");
if (adapter->rx_work_enabled) {
skb_queue_tail(&adapter->rx_data_q, skb);
adapter->data_received = true;
@@ -1141,7 +1168,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
break;
case MWIFIEX_TYPE_CMD:
- dev_dbg(adapter->dev, "info: --- Rx: Cmd Response ---\n");
+ mwifiex_dbg(adapter, CMD,
+ "info: --- Rx: Cmd Response ---\n");
/* take care of curr_cmd = NULL case */
if (!adapter->curr_cmd) {
cmd_buf = adapter->upld_buf;
@@ -1163,7 +1191,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
break;
case MWIFIEX_TYPE_EVENT:
- dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
+ mwifiex_dbg(adapter, EVENT,
+ "info: --- Rx: Event ---\n");
adapter->event_cause = le32_to_cpu(*(__le32 *) skb->data);
if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE))
@@ -1178,7 +1207,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
break;
default:
- dev_err(adapter->dev, "unknown upload type %#x\n", upld_typ);
+ mwifiex_dbg(adapter, ERROR,
+ "unknown upload type %#x\n", upld_typ);
dev_kfree_skb_any(skb);
break;
}
@@ -1210,16 +1240,18 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
if ((card->has_control_mask) && (port == CTRL_PORT)) {
/* Read the command Resp without aggr */
- dev_dbg(adapter->dev, "info: %s: no aggregation for cmd "
- "response\n", __func__);
+ mwifiex_dbg(adapter, CMD,
+ "info: %s: no aggregation for cmd\t"
+ "response\n", __func__);
f_do_rx_cur = 1;
goto rx_curr_single;
}
if (!card->mpa_rx.enabled) {
- dev_dbg(adapter->dev, "info: %s: rx aggregation disabled\n",
- __func__);
+ mwifiex_dbg(adapter, WARN,
+ "info: %s: rx aggregation disabled\n",
+ __func__);
f_do_rx_cur = 1;
goto rx_curr_single;
@@ -1230,7 +1262,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
(card->has_control_mask && (card->mp_rd_bitmap &
(~((u32) CTRL_PORT_MASK))))) {
/* Some more data RX pending */
- dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: not last packet\n", __func__);
if (MP_RX_AGGR_IN_PROGRESS(card)) {
if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len)) {
@@ -1247,7 +1280,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
} else {
/* No more data RX pending */
- dev_dbg(adapter->dev, "info: %s: last packet\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: last packet\n", __func__);
if (MP_RX_AGGR_IN_PROGRESS(card)) {
f_do_rx_aggr = 1;
@@ -1262,14 +1296,16 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
}
if (f_aggr_cur) {
- dev_dbg(adapter->dev, "info: current packet aggregation\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: current packet aggregation\n");
/* Curr pkt can be aggregated */
mp_rx_aggr_setup(card, rx_len, port);
if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
mp_rx_aggr_port_limit_reached(card)) {
- dev_dbg(adapter->dev, "info: %s: aggregated packet "
- "limit reached\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: aggregated packet\t"
+ "limit reached\n", __func__);
/* No more pkts allowed in Aggr buf, rx it */
f_do_rx_aggr = 1;
}
@@ -1277,8 +1313,9 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
if (f_do_rx_aggr) {
/* do aggr RX now */
- dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n",
- card->mpa_rx.pkt_cnt);
+ mwifiex_dbg(adapter, DATA,
+ "info: do_rx_aggr: num of packets: %d\n",
+ card->mpa_rx.pkt_cnt);
if (card->supports_sdio_new_mode) {
int i;
@@ -1318,8 +1355,9 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
GFP_KERNEL |
GFP_DMA);
if (!skb_deaggr) {
- dev_err(adapter->dev, "skb allocation failure drop pkt len=%d type=%d\n",
- pkt_len, pkt_type);
+ mwifiex_dbg(adapter, ERROR, "skb allocation failure\t"
+ "drop pkt len=%d type=%d\n",
+ pkt_len, pkt_type);
curr_ptr += len_arr[pind];
continue;
}
@@ -1339,12 +1377,12 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
mwifiex_decode_rx_packet(adapter, skb_deaggr,
pkt_type);
} else {
- dev_err(adapter->dev, " drop wrong aggr pkt:\t"
- "sdio_single_port_rx_aggr=%d\t"
- "type=%d len=%d max_len=%d\n",
- adapter->sdio_rx_aggr_enable,
- pkt_type, pkt_len,
- len_arr[pind]);
+ mwifiex_dbg(adapter, ERROR,
+ "drop wrong aggr pkt:\t"
+ "sdio_single_port_rx_aggr=%d\t"
+ "type=%d len=%d max_len=%d\n",
+ adapter->sdio_rx_aggr_enable,
+ pkt_type, pkt_len, len_arr[pind]);
dev_kfree_skb_any(skb_deaggr);
}
curr_ptr += len_arr[pind];
@@ -1354,13 +1392,14 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
rx_curr_single:
if (f_do_rx_cur) {
- dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n",
- port, rx_len);
+ mwifiex_dbg(adapter, INFO, "info: RX: port: %d, rx_len: %d\n",
+ port, rx_len);
skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
if (!skb) {
- dev_err(adapter->dev, "single skb allocated fail,\t"
- "drop pkt port=%d len=%d\n", port, rx_len);
+ mwifiex_dbg(adapter, ERROR,
+ "single skb allocated fail,\t"
+ "drop pkt port=%d len=%d\n", port, rx_len);
if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
card->mpa_rx.buf, rx_len,
adapter->ioport + port))
@@ -1376,9 +1415,9 @@ rx_curr_single:
goto error;
if (!adapter->sdio_rx_aggr_enable &&
pkt_type == MWIFIEX_TYPE_AGGR_DATA) {
- dev_err(adapter->dev, "drop wrong pkt type %d\t"
- "current SDIO RX Aggr not enabled\n",
- pkt_type);
+ mwifiex_dbg(adapter, ERROR, "drop wrong pkt type %d\t"
+ "current SDIO RX Aggr not enabled\n",
+ pkt_type);
dev_kfree_skb_any(skb);
return 0;
}
@@ -1386,7 +1425,8 @@ rx_curr_single:
mwifiex_decode_rx_packet(adapter, skb, pkt_type);
}
if (f_post_aggr_cur) {
- dev_dbg(adapter->dev, "info: current packet aggregation\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: current packet aggregation\n");
/* Curr pkt can be aggregated */
mp_rx_aggr_setup(card, rx_len, port);
}
@@ -1458,7 +1498,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
MWIFIEX_RX_DATA_BUF_SIZE)
return -1;
rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
- dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
+ mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n", rx_len);
skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
if (!skb)
@@ -1469,17 +1509,17 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data,
skb->len, adapter->ioport |
CMD_PORT_SLCT)) {
- dev_err(adapter->dev,
- "%s: failed to card_to_host", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to card_to_host", __func__);
dev_kfree_skb_any(skb);
goto term_cmd;
}
if ((pkt_type != MWIFIEX_TYPE_CMD) &&
(pkt_type != MWIFIEX_TYPE_EVENT))
- dev_err(adapter->dev,
- "%s:Received wrong packet on cmd port",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s:Received wrong packet on cmd port",
+ __func__);
mwifiex_decode_rx_packet(adapter, skb, pkt_type);
}
@@ -1495,12 +1535,13 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
}
card->mp_wr_bitmap = bitmap;
- dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%x\n",
- card->mp_wr_bitmap);
+ mwifiex_dbg(adapter, INTR,
+ "int: DNLD: wr_bitmap=0x%x\n",
+ card->mp_wr_bitmap);
if (adapter->data_sent &&
(card->mp_wr_bitmap & card->mp_data_port_mask)) {
- dev_dbg(adapter->dev,
- "info: <--- Tx DONE Interrupt --->\n");
+ mwifiex_dbg(adapter, INTR,
+ "info: <--- Tx DONE Interrupt --->\n");
adapter->data_sent = false;
}
}
@@ -1517,8 +1558,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
adapter->cmd_sent = false;
}
- dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
- adapter->cmd_sent, adapter->data_sent);
+ mwifiex_dbg(adapter, INTR, "info: cmd_sent=%d data_sent=%d\n",
+ adapter->cmd_sent, adapter->data_sent);
if (sdio_ireg & UP_LD_HOST_INT_STATUS) {
bitmap = (u32) card->mp_regs[reg->rd_bitmap_l];
bitmap |= ((u32) card->mp_regs[reg->rd_bitmap_u]) << 8;
@@ -1529,40 +1570,45 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
((u32) card->mp_regs[reg->rd_bitmap_1u]) << 24;
}
card->mp_rd_bitmap = bitmap;
- dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%x\n",
- card->mp_rd_bitmap);
+ mwifiex_dbg(adapter, INTR,
+ "int: UPLD: rd_bitmap=0x%x\n",
+ card->mp_rd_bitmap);
while (true) {
ret = mwifiex_get_rd_port(adapter, &port);
if (ret) {
- dev_dbg(adapter->dev,
- "info: no more rd_port available\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: no more rd_port available\n");
break;
}
len_reg_l = reg->rd_len_p0_l + (port << 1);
len_reg_u = reg->rd_len_p0_u + (port << 1);
rx_len = ((u16) card->mp_regs[len_reg_u]) << 8;
rx_len |= (u16) card->mp_regs[len_reg_l];
- dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n",
- port, rx_len);
+ mwifiex_dbg(adapter, INFO,
+ "info: RX: port=%d rx_len=%u\n",
+ port, rx_len);
rx_blocks =
(rx_len + MWIFIEX_SDIO_BLOCK_SIZE -
1) / MWIFIEX_SDIO_BLOCK_SIZE;
if (rx_len <= INTF_HEADER_LEN ||
(rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
card->mpa_rx.buf_size) {
- dev_err(adapter->dev, "invalid rx_len=%d\n",
- rx_len);
+ mwifiex_dbg(adapter, ERROR,
+ "invalid rx_len=%d\n",
+ rx_len);
return -1;
}
rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
- dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
+ mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n",
+ rx_len);
if (mwifiex_sdio_card_to_host_mp_aggr(adapter, rx_len,
port)) {
- dev_err(adapter->dev, "card_to_host_mpa failed:"
- " int status=%#x\n", sdio_ireg);
+ mwifiex_dbg(adapter, ERROR,
+ "card_to_host_mpa failed: int status=%#x\n",
+ sdio_ireg);
goto term_cmd;
}
}
@@ -1573,19 +1619,23 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
term_cmd:
/* terminate cmd */
if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
- dev_err(adapter->dev, "read CFG reg failed\n");
+ mwifiex_dbg(adapter, ERROR, "read CFG reg failed\n");
else
- dev_dbg(adapter->dev, "info: CFG reg val = %d\n", cr);
+ mwifiex_dbg(adapter, INFO,
+ "info: CFG reg val = %d\n", cr);
if (mwifiex_write_reg(adapter, CONFIGURATION_REG, (cr | 0x04)))
- dev_err(adapter->dev, "write CFG reg failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "write CFG reg failed\n");
else
- dev_dbg(adapter->dev, "info: write success\n");
+ mwifiex_dbg(adapter, INFO, "info: write success\n");
if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
- dev_err(adapter->dev, "read CFG reg failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "read CFG reg failed\n");
else
- dev_dbg(adapter->dev, "info: CFG reg val =%x\n", cr);
+ mwifiex_dbg(adapter, INFO,
+ "info: CFG reg val =%x\n", cr);
return -1;
}
@@ -1619,8 +1669,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
if (!card->mpa_tx.enabled ||
(card->has_control_mask && (port == CTRL_PORT)) ||
(card->supports_sdio_new_mode && (port == CMD_PORT_SLCT))) {
- dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n",
- __func__);
+ mwifiex_dbg(adapter, WARN,
+ "info: %s: tx aggregation disabled\n",
+ __func__);
f_send_cur_buf = 1;
goto tx_curr_single;
@@ -1628,8 +1679,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
if (next_pkt_len) {
/* More pkt in TX queue */
- dev_dbg(adapter->dev, "info: %s: more packets in queue.\n",
- __func__);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: more packets in queue.\n",
+ __func__);
if (MP_TX_AGGR_IN_PROGRESS(card)) {
if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) {
@@ -1659,8 +1711,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
}
} else {
/* Last pkt in TX queue */
- dev_dbg(adapter->dev, "info: %s: Last packet in Tx Queue.\n",
- __func__);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: Last packet in Tx Queue.\n",
+ __func__);
if (MP_TX_AGGR_IN_PROGRESS(card)) {
/* some packs in Aggr buf already */
@@ -1677,8 +1730,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
}
if (f_precopy_cur_buf) {
- dev_dbg(adapter->dev, "data: %s: precopy current buffer\n",
- __func__);
+ mwifiex_dbg(adapter, DATA,
+ "data: %s: precopy current buffer\n",
+ __func__);
MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) ||
@@ -1688,9 +1742,10 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
}
if (f_send_aggr_buf) {
- dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n",
- __func__,
- card->mpa_tx.start_port, card->mpa_tx.ports);
+ mwifiex_dbg(adapter, DATA,
+ "data: %s: send aggr buffer: %d %d\n",
+ __func__, card->mpa_tx.start_port,
+ card->mpa_tx.ports);
if (card->supports_sdio_new_mode) {
u32 port_count;
int i;
@@ -1719,15 +1774,17 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
tx_curr_single:
if (f_send_cur_buf) {
- dev_dbg(adapter->dev, "data: %s: send current buffer %d\n",
- __func__, port);
+ mwifiex_dbg(adapter, DATA,
+ "data: %s: send current buffer %d\n",
+ __func__, port);
ret = mwifiex_write_data_to_card(adapter, payload, pkt_len,
adapter->ioport + port);
}
if (f_postcopy_cur_buf) {
- dev_dbg(adapter->dev, "data: %s: postcopy current buffer\n",
- __func__);
+ mwifiex_dbg(adapter, DATA,
+ "data: %s: postcopy current buffer\n",
+ __func__);
MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
}
@@ -1771,8 +1828,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
if (type == MWIFIEX_TYPE_DATA) {
ret = mwifiex_get_wr_port_data(adapter, &port);
if (ret) {
- dev_err(adapter->dev, "%s: no wr_port available\n",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: no wr_port available\n",
+ __func__);
return ret;
}
} else {
@@ -1781,8 +1839,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
if (pkt_len <= INTF_HEADER_LEN ||
pkt_len > MWIFIEX_UPLD_SIZE)
- dev_err(adapter->dev, "%s: payload=%p, nb=%d\n",
- __func__, payload, pkt_len);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: payload=%p, nb=%d\n",
+ __func__, payload, pkt_len);
if (card->supports_sdio_new_mode)
port = CMD_PORT_SLCT;
@@ -1896,7 +1955,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE);
sdio_release_host(func);
if (ret) {
- pr_err("cannot set SDIO block size\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot set SDIO block size\n");
return ret;
}
@@ -1977,7 +2037,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
card->mp_tx_agg_buf_size,
card->mp_rx_agg_buf_size);
if (ret) {
- dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n");
+ mwifiex_dbg(adapter, ERROR,
+ "failed to alloc sdio mp-a buffers\n");
kfree(card->mp_regs);
return -1;
}
@@ -2041,8 +2102,9 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
card->curr_wr_port = reg->start_wr_port;
- dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n",
- port, card->mp_data_port_mask);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: mp_end_port %d, data port mask 0x%x\n",
+ port, card->mp_data_port_mask);
}
static struct mwifiex_adapter *save_adapter;
@@ -2059,7 +2121,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
* We run it in a totally independent workqueue.
*/
- pr_err("Resetting card...\n");
+ mwifiex_dbg(adapter, WARN, "Resetting card...\n");
mmc_remove_host(target);
/* 200ms delay is based on experiment with sdhci controller */
mdelay(200);
@@ -2079,14 +2141,14 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl,
&ret);
if (ret) {
- dev_err(adapter->dev, "SDIO Write ERR\n");
+ mwifiex_dbg(adapter, ERROR, "SDIO Write ERR\n");
return RDWR_STATUS_FAILURE;
}
for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
ctrl_data = sdio_readb(card->func, card->reg->fw_dump_ctrl,
&ret);
if (ret) {
- dev_err(adapter->dev, "SDIO read err\n");
+ mwifiex_dbg(adapter, ERROR, "SDIO read err\n");
return RDWR_STATUS_FAILURE;
}
if (ctrl_data == FW_DUMP_DONE)
@@ -2094,19 +2156,20 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
if (doneflag && ctrl_data == doneflag)
return RDWR_STATUS_DONE;
if (ctrl_data != FW_DUMP_HOST_READY) {
- dev_info(adapter->dev,
- "The ctrl reg was changed, re-try again!\n");
+ mwifiex_dbg(adapter, WARN,
+ "The ctrl reg was changed, re-try again!\n");
sdio_writeb(card->func, FW_DUMP_HOST_READY,
card->reg->fw_dump_ctrl, &ret);
if (ret) {
- dev_err(adapter->dev, "SDIO write err\n");
+ mwifiex_dbg(adapter, ERROR, "SDIO write err\n");
return RDWR_STATUS_FAILURE;
}
}
usleep_range(100, 200);
}
if (ctrl_data == FW_DUMP_HOST_READY) {
- dev_err(adapter->dev, "Fail to pull ctrl_data\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Fail to pull ctrl_data\n");
return RDWR_STATUS_FAILURE;
}
@@ -2114,7 +2177,7 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
}
/* This function dump firmware memory to file */
-static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
+static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
int ret = 0;
@@ -2122,9 +2185,6 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0;
enum rdwr_status stat;
u32 memory_size;
- static char *env[] = { "DRIVER=mwifiex_sdio", "EVENT=fw_dump", NULL };
-
- mwifiex_dump_drv_info(adapter);
if (!card->can_dump_fw)
return;
@@ -2142,7 +2202,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
mwifiex_pm_wakeup_card(adapter);
sdio_claim_host(card->func);
- dev_info(adapter->dev, "== mwifiex firmware dump start ==\n");
+ mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n");
stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
if (stat == RDWR_STATUS_FAILURE)
@@ -2152,7 +2212,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
/* Read the number of the memories which will dump */
dump_num = sdio_readb(card->func, reg, &ret);
if (ret) {
- dev_err(adapter->dev, "SDIO read memory length err\n");
+ mwifiex_dbg(adapter, ERROR, "SDIO read memory length err\n");
goto done;
}
@@ -2169,7 +2229,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
for (i = 0; i < 4; i++) {
read_reg = sdio_readb(card->func, reg, &ret);
if (ret) {
- dev_err(adapter->dev, "SDIO read err\n");
+ mwifiex_dbg(adapter, ERROR, "SDIO read err\n");
goto done;
}
memory_size |= (read_reg << i*8);
@@ -2177,25 +2237,33 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
}
if (memory_size == 0) {
- dev_info(adapter->dev, "Firmware dump Finished!\n");
+ mwifiex_dbg(adapter, DUMP, "Firmware dump Finished!\n");
+ ret = mwifiex_write_reg(adapter,
+ card->reg->fw_dump_ctrl,
+ FW_DUMP_READ_DONE);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "SDIO write err\n");
+ return;
+ }
break;
}
- dev_info(adapter->dev,
- "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
+ mwifiex_dbg(adapter, DUMP,
+ "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
entry->mem_ptr = vmalloc(memory_size + 1);
entry->mem_size = memory_size;
if (!entry->mem_ptr) {
- dev_err(adapter->dev, "Vmalloc %s failed\n",
- entry->mem_name);
+ mwifiex_dbg(adapter, ERROR, "Vmalloc %s failed\n",
+ entry->mem_name);
goto done;
}
dbg_ptr = entry->mem_ptr;
end_ptr = dbg_ptr + memory_size;
doneflag = entry->done_flag;
- dev_info(adapter->dev, "Start %s output, please wait...\n",
- entry->mem_name);
+ mwifiex_dbg(adapter, DUMP,
+ "Start %s output, please wait...\n",
+ entry->mem_name);
do {
stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
@@ -2207,39 +2275,43 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
for (reg = reg_start; reg <= reg_end; reg++) {
*dbg_ptr = sdio_readb(card->func, reg, &ret);
if (ret) {
- dev_err(adapter->dev,
- "SDIO read err\n");
+ mwifiex_dbg(adapter, ERROR,
+ "SDIO read err\n");
goto done;
}
if (dbg_ptr < end_ptr)
dbg_ptr++;
else
- dev_err(adapter->dev,
- "Allocated buf not enough\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Allocated buf not enough\n");
}
if (stat != RDWR_STATUS_DONE)
continue;
- dev_info(adapter->dev, "%s done: size=0x%tx\n",
- entry->mem_name, dbg_ptr - entry->mem_ptr);
+ mwifiex_dbg(adapter, DUMP, "%s done: size=0x%tx\n",
+ entry->mem_name, dbg_ptr - entry->mem_ptr);
break;
} while (1);
}
- dev_info(adapter->dev, "== mwifiex firmware dump end ==\n");
-
- kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env);
+ mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n");
done:
sdio_release_host(card->func);
- adapter->curr_mem_idx = 0;
+}
+
+static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
+{
+ mwifiex_drv_info_dump(adapter);
+ mwifiex_sdio_fw_dump(adapter);
+ mwifiex_upload_device_dump(adapter);
}
static void mwifiex_sdio_work(struct work_struct *work)
{
- if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
+ if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
&iface_work_flags))
- mwifiex_sdio_fw_dump_work(save_adapter);
+ mwifiex_sdio_device_dump_work(save_adapter);
if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
&iface_work_flags))
mwifiex_sdio_card_reset_work(save_adapter);
@@ -2259,13 +2331,13 @@ static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
}
/* This function dumps FW information */
-static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
+static void mwifiex_sdio_device_dump(struct mwifiex_adapter *adapter)
{
save_adapter = adapter;
- if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
+ if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags))
return;
- set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
+ set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
schedule_work(&sdio_work);
}
@@ -2285,7 +2357,7 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
if (!p)
return 0;
- dev_info(adapter->dev, "SDIO register DUMP START\n");
+ mwifiex_dbg(adapter, MSG, "SDIO register dump start\n");
mwifiex_pm_wakeup_card(adapter);
@@ -2351,13 +2423,13 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
reg++;
}
- dev_info(adapter->dev, "%s\n", buf);
+ mwifiex_dbg(adapter, MSG, "%s\n", buf);
p += sprintf(p, "%s\n", buf);
}
sdio_release_host(cardp->func);
- dev_info(adapter->dev, "SDIO register DUMP END\n");
+ mwifiex_dbg(adapter, MSG, "SDIO register dump end\n");
return p - drv_buf;
}
@@ -2382,8 +2454,8 @@ static struct mwifiex_if_ops sdio_ops = {
.cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
.event_complete = mwifiex_sdio_event_complete,
.card_reset = mwifiex_sdio_card_reset,
- .fw_dump = mwifiex_sdio_fw_dump,
.reg_dump = mwifiex_sdio_reg_dump,
+ .device_dump = mwifiex_sdio_device_dump,
.deaggr_pkt = mwifiex_deaggr_sdio_pkt,
};
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 49422f2a5380..037adcd1f484 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -77,8 +77,8 @@ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv,
struct host_cmd_ds_mac_control *mac_ctrl = &cmd->params.mac_ctrl;
if (cmd_action != HostCmd_ACT_GEN_SET) {
- dev_err(priv->adapter->dev,
- "mac_control: only support set cmd\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "mac_control: only support set cmd\n");
return -1;
}
@@ -112,7 +112,8 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
{
struct host_cmd_ds_802_11_snmp_mib *snmp_mib = &cmd->params.smib;
- dev_dbg(priv->adapter->dev, "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid);
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SNMP_MIB);
cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_snmp_mib)
- 1 + S_DS_GEN);
@@ -129,11 +130,11 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
le16_add_cpu(&cmd->size, sizeof(u16));
}
- dev_dbg(priv->adapter->dev,
- "cmd: SNMP_CMD: Action=0x%x, OID=0x%x, OIDSize=0x%x,"
- " Value=0x%x\n",
- cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
- le16_to_cpu(*(__le16 *) snmp_mib->value));
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: SNMP_CMD: Action=0x%x, OID=0x%x,\t"
+ "OIDSize=0x%x, Value=0x%x\n",
+ cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
+ le16_to_cpu(*(__le16 *)snmp_mib->value));
return 0;
}
@@ -356,9 +357,9 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
(hscfg_param->conditions != cpu_to_le32(HS_CFG_CANCEL)) &&
((adapter->arp_filter_size > 0) &&
(adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) {
- dev_dbg(adapter->dev,
- "cmd: Attach %d bytes ArpFilter to HSCfg cmd\n",
- adapter->arp_filter_size);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: Attach %d bytes ArpFilter to HSCfg cmd\n",
+ adapter->arp_filter_size);
memcpy(((u8 *) hs_cfg) +
sizeof(struct host_cmd_ds_802_11_hs_cfg_enh),
adapter->arp_filter, adapter->arp_filter_size);
@@ -378,11 +379,11 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
hs_cfg->params.hs_config.conditions = hscfg_param->conditions;
hs_cfg->params.hs_config.gpio = hscfg_param->gpio;
hs_cfg->params.hs_config.gap = hscfg_param->gap;
- dev_dbg(adapter->dev,
- "cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n",
- hs_cfg->params.hs_config.conditions,
- hs_cfg->params.hs_config.gpio,
- hs_cfg->params.hs_config.gap);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n",
+ hs_cfg->params.hs_config.conditions,
+ hs_cfg->params.hs_config.gpio,
+ hs_cfg->params.hs_config.gap);
}
return 0;
@@ -462,7 +463,7 @@ static int mwifiex_cmd_802_11_deauthenticate(struct mwifiex_private *priv,
/* Set AP MAC address */
memcpy(deauth->mac_addr, mac, ETH_ALEN);
- dev_dbg(priv->adapter->dev, "cmd: Deauth: %pM\n", deauth->mac_addr);
+ mwifiex_dbg(priv->adapter, CMD, "cmd: Deauth: %pM\n", deauth->mac_addr);
deauth->reason_code = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING);
@@ -540,9 +541,9 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
} else if (!priv->wep_key[i].key_length) {
continue;
} else {
- dev_err(priv->adapter->dev,
- "key%d Length = %d is incorrect\n",
- (i + 1), priv->wep_key[i].key_length);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "key%d Length = %d is incorrect\n",
+ (i + 1), priv->wep_key[i].key_length);
return -1;
}
}
@@ -562,7 +563,8 @@ static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv,
u16 size, len = KEY_PARAMS_FIXED_LEN;
if (enc_key->is_igtk_key) {
- dev_dbg(adapter->dev, "%s: Set CMAC AES Key\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "%s: Set CMAC AES Key\n", __func__);
if (enc_key->is_rx_seq_valid)
memcpy(km->key_param_set.key_params.cmac_aes.ipn,
enc_key->pn, enc_key->pn_len);
@@ -575,7 +577,8 @@ static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv,
enc_key->key_material, enc_key->key_len);
len += sizeof(struct mwifiex_cmac_aes_param);
} else {
- dev_dbg(adapter->dev, "%s: Set AES Key\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "%s: Set AES Key\n", __func__);
if (enc_key->is_rx_seq_valid)
memcpy(km->key_param_set.key_params.aes.pn,
enc_key->pn, enc_key->pn_len);
@@ -619,7 +622,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
km->action = cpu_to_le16(cmd_action);
if (cmd_action == HostCmd_ACT_GEN_GET) {
- dev_dbg(adapter->dev, "%s: Get key\n", __func__);
+ mwifiex_dbg(adapter, INFO, "%s: Get key\n", __func__);
km->key_param_set.key_idx =
enc_key->key_index & KEY_INDEX_MASK;
km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
@@ -646,7 +649,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
sizeof(struct mwifiex_ie_type_key_param_set_v2));
if (enc_key->key_disable) {
- dev_dbg(adapter->dev, "%s: Remove key\n", __func__);
+ mwifiex_dbg(adapter, INFO, "%s: Remove key\n", __func__);
km->action = cpu_to_le16(HostCmd_ACT_GEN_REMOVE);
km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
@@ -667,7 +670,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
if (enc_key->key_len <= WLAN_KEY_LEN_WEP104) {
- dev_dbg(adapter->dev, "%s: Set WEP Key\n", __func__);
+ mwifiex_dbg(adapter, INFO, "%s: Set WEP Key\n", __func__);
len += sizeof(struct mwifiex_wep_param);
km->key_param_set.len = cpu_to_le16(len);
km->key_param_set.key_type = KEY_TYPE_ID_WEP;
@@ -710,7 +713,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
key_info |= KEY_UNICAST | KEY_TX_KEY | KEY_RX_KEY;
if (enc_key->is_wapi_key) {
- dev_dbg(adapter->dev, "%s: Set WAPI Key\n", __func__);
+ mwifiex_dbg(adapter, INFO, "%s: Set WAPI Key\n", __func__);
km->key_param_set.key_type = KEY_TYPE_ID_WAPI;
memcpy(km->key_param_set.key_params.wapi.pn, enc_key->pn,
PN_LEN);
@@ -750,7 +753,8 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
return mwifiex_set_aes_key_v2(priv, cmd, enc_key, km);
if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
- dev_dbg(adapter->dev, "%s: Set TKIP Key\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "%s: Set TKIP Key\n", __func__);
if (enc_key->is_rx_seq_valid)
memcpy(km->key_param_set.key_params.tkip.pn,
enc_key->pn, enc_key->pn_len);
@@ -814,7 +818,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
memset(&key_material->key_param_set, 0,
sizeof(struct mwifiex_ie_type_key_param_set));
if (enc_key->is_wapi_key) {
- dev_dbg(priv->adapter->dev, "info: Set WAPI Key\n");
+ mwifiex_dbg(priv->adapter, INFO, "info: Set WAPI Key\n");
key_material->key_param_set.key_type_id =
cpu_to_le16(KEY_TYPE_ID_WAPI);
if (cmd_oid == KEY_INFO_ENABLED)
@@ -860,7 +864,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
}
if (enc_key->key_len == WLAN_KEY_LEN_CCMP) {
if (enc_key->is_igtk_key) {
- dev_dbg(priv->adapter->dev, "cmd: CMAC_AES\n");
+ mwifiex_dbg(priv->adapter, CMD, "cmd: CMAC_AES\n");
key_material->key_param_set.key_type_id =
cpu_to_le16(KEY_TYPE_ID_AES_CMAC);
if (cmd_oid == KEY_INFO_ENABLED)
@@ -873,7 +877,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
key_material->key_param_set.key_info |=
cpu_to_le16(KEY_IGTK);
} else {
- dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
+ mwifiex_dbg(priv->adapter, CMD, "cmd: WPA_AES\n");
key_material->key_param_set.key_type_id =
cpu_to_le16(KEY_TYPE_ID_AES);
if (cmd_oid == KEY_INFO_ENABLED)
@@ -892,7 +896,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
cpu_to_le16(KEY_MCAST);
}
} else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
- dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n");
+ mwifiex_dbg(priv->adapter, CMD, "cmd: WPA_TKIP\n");
key_material->key_param_set.key_type_id =
cpu_to_le16(KEY_TYPE_ID_TKIP);
key_material->key_param_set.key_info =
@@ -999,7 +1003,8 @@ static int mwifiex_cmd_802_11d_domain_info(struct mwifiex_private *priv,
&domain_info->domain;
u8 no_of_triplet = adapter->domain_reg.no_of_triplet;
- dev_dbg(adapter->dev, "info: 11D: no_of_triplet=0x%x\n", no_of_triplet);
+ mwifiex_dbg(adapter, INFO,
+ "info: 11D: no_of_triplet=0x%x\n", no_of_triplet);
cmd->command = cpu_to_le16(HostCmd_CMD_802_11D_DOMAIN_INFO);
domain_info->action = cpu_to_le16(cmd_action);
@@ -1071,6 +1076,26 @@ static int mwifiex_cmd_ibss_coalescing_status(struct host_cmd_ds_command *cmd,
return 0;
}
+/* This function prepares command buffer to get/set memory location value.
+ */
+static int
+mwifiex_cmd_mem_access(struct host_cmd_ds_command *cmd, u16 cmd_action,
+ void *pdata_buf)
+{
+ struct mwifiex_ds_mem_rw *mem_rw = (void *)pdata_buf;
+ struct host_cmd_ds_mem_access *mem_access = (void *)&cmd->params.mem;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_MEM_ACCESS);
+ cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_mem_access) +
+ S_DS_GEN);
+
+ mem_access->action = cpu_to_le16(cmd_action);
+ mem_access->addr = cpu_to_le32(mem_rw->addr);
+ mem_access->value = cpu_to_le32(mem_rw->value);
+
+ return 0;
+}
+
/*
* This function prepares command to set/get register value.
*
@@ -1215,8 +1240,9 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv,
(u32)(card->sleep_cookie_pbase);
host_spec->sleep_cookie_addr_hi =
(u32)(((u64)(card->sleep_cookie_pbase)) >> 32);
- dev_dbg(priv->adapter->dev, "sleep_cook_lo phy addr: 0x%x\n",
- host_spec->sleep_cookie_addr_lo);
+ mwifiex_dbg(priv->adapter, INFO,
+ "sleep_cook_lo phy addr: 0x%x\n",
+ host_spec->sleep_cookie_addr_lo);
}
return 0;
@@ -1243,7 +1269,8 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
S_DS_GEN);
subsc_evt->action = cpu_to_le16(subsc_evt_cfg->action);
- dev_dbg(priv->adapter->dev, "cmd: action: %d\n", subsc_evt_cfg->action);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: action: %d\n", subsc_evt_cfg->action);
/*For query requests, no configuration TLV structures are to be added.*/
if (subsc_evt_cfg->action == HostCmd_ACT_GEN_GET)
@@ -1252,14 +1279,15 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
subsc_evt->events = cpu_to_le16(subsc_evt_cfg->events);
event_bitmap = subsc_evt_cfg->events;
- dev_dbg(priv->adapter->dev, "cmd: event bitmap : %16x\n",
- event_bitmap);
+ mwifiex_dbg(priv->adapter, CMD, "cmd: event bitmap : %16x\n",
+ event_bitmap);
if (((subsc_evt_cfg->action == HostCmd_ACT_BITWISE_CLR) ||
(subsc_evt_cfg->action == HostCmd_ACT_BITWISE_SET)) &&
(event_bitmap == 0)) {
- dev_dbg(priv->adapter->dev, "Error: No event specified "
- "for bitwise action type\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Error: No event specified\t"
+ "for bitwise action type\n");
return -EINVAL;
}
@@ -1284,10 +1312,11 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
rssi_tlv->abs_value = subsc_evt_cfg->bcn_l_rssi_cfg.abs_value;
rssi_tlv->evt_freq = subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq;
- dev_dbg(priv->adapter->dev, "Cfg Beacon Low Rssi event, "
- "RSSI:-%d dBm, Freq:%d\n",
- subsc_evt_cfg->bcn_l_rssi_cfg.abs_value,
- subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq);
+ mwifiex_dbg(priv->adapter, EVENT,
+ "Cfg Beacon Low Rssi event,\t"
+ "RSSI:-%d dBm, Freq:%d\n",
+ subsc_evt_cfg->bcn_l_rssi_cfg.abs_value,
+ subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq);
pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
le16_add_cpu(&cmd->size,
@@ -1304,10 +1333,11 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
rssi_tlv->abs_value = subsc_evt_cfg->bcn_h_rssi_cfg.abs_value;
rssi_tlv->evt_freq = subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq;
- dev_dbg(priv->adapter->dev, "Cfg Beacon High Rssi event, "
- "RSSI:-%d dBm, Freq:%d\n",
- subsc_evt_cfg->bcn_h_rssi_cfg.abs_value,
- subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq);
+ mwifiex_dbg(priv->adapter, EVENT,
+ "Cfg Beacon High Rssi event,\t"
+ "RSSI:-%d dBm, Freq:%d\n",
+ subsc_evt_cfg->bcn_h_rssi_cfg.abs_value,
+ subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq);
pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
le16_add_cpu(&cmd->size,
@@ -1463,12 +1493,14 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
data, len);
if (ret)
return ret;
- dev_dbg(adapter->dev,
- "download cfg_data from device tree: %s\n", prop->name);
+ mwifiex_dbg(adapter, INFO,
+ "download cfg_data from device tree: %s\n",
+ prop->name);
} else if (adapter->cal_data->data && adapter->cal_data->size > 0) {
len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data,
adapter->cal_data->size, data);
- dev_dbg(adapter->dev, "download cfg_data from config file\n");
+ mwifiex_dbg(adapter, INFO,
+ "download cfg_data from config file\n");
} else {
return -1;
}
@@ -1583,9 +1615,9 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CONFIG);
if (!params) {
- dev_err(priv->adapter->dev,
- "TDLS config params not available for %pM\n",
- oper->peer_mac);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "TDLS config params not available for %pM\n",
+ oper->peer_mac);
return -ENODATA;
}
@@ -1663,7 +1695,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
break;
default:
- dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
+ mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS operation\n");
return -ENOTSUPP;
}
@@ -1870,8 +1902,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_11n_cfg(priv, cmd_ptr, cmd_action, data_buf);
break;
case HostCmd_CMD_WMM_GET_STATUS:
- dev_dbg(priv->adapter->dev,
- "cmd: WMM: WMM_GET_STATUS cmd sent\n");
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: WMM: WMM_GET_STATUS cmd sent\n");
cmd_ptr->command = cpu_to_le16(HostCmd_CMD_WMM_GET_STATUS);
cmd_ptr->size =
cpu_to_le16(sizeof(struct host_cmd_ds_wmm_get_status) +
@@ -1885,6 +1917,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_802_11_SCAN_EXT:
ret = mwifiex_cmd_802_11_scan_ext(priv, cmd_ptr, data_buf);
break;
+ case HostCmd_CMD_MEM_ACCESS:
+ ret = mwifiex_cmd_mem_access(cmd_ptr, cmd_action, data_buf);
+ break;
case HostCmd_CMD_MAC_REG_ACCESS:
case HostCmd_CMD_BBP_REG_ACCESS:
case HostCmd_CMD_RF_REG_ACCESS:
@@ -1932,8 +1967,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
data_buf);
break;
default:
- dev_err(priv->adapter->dev,
- "PREP_CMD: unknown cmd- %#x\n", cmd_no);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "PREP_CMD: unknown cmd- %#x\n", cmd_no);
ret = -1;
break;
}
@@ -2024,8 +2059,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
&sdio_sp_rx_aggr_enable,
true);
if (ret) {
- dev_err(priv->adapter->dev,
- "error while enabling SP aggregation..disable it");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "error while enabling SP aggregation..disable it");
adapter->sdio_rx_aggr_enable = false;
}
}
@@ -2108,8 +2143,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
HostCmd_ACT_GEN_SET, DOT11D_I,
&state_11d, true);
if (ret)
- dev_err(priv->adapter->dev,
- "11D: failed to enable 11D\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "11D: failed to enable 11D\n");
}
/* Send cmd to FW to configure 11n specific configuration
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 88dc6b672ef4..aa5b9a310340 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -49,8 +49,9 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
struct host_cmd_ds_802_11_ps_mode_enh *pm;
unsigned long flags;
- dev_err(adapter->dev, "CMD_RESP: cmd %#x error, result=%#x\n",
- resp->command, resp->result);
+ mwifiex_dbg(adapter, ERROR,
+ "CMD_RESP: cmd %#x error, result=%#x\n",
+ resp->command, resp->result);
if (adapter->curr_cmd->wait_q_enabled)
adapter->cmd_wait_q.status = -1;
@@ -58,9 +59,9 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
switch (le16_to_cpu(resp->command)) {
case HostCmd_CMD_802_11_PS_MODE_ENH:
pm = &resp->params.psmode_enh;
- dev_err(adapter->dev,
- "PS_MODE_ENH cmd failed: result=0x%x action=0x%X\n",
- resp->result, le16_to_cpu(pm->action));
+ mwifiex_dbg(adapter, ERROR,
+ "PS_MODE_ENH cmd failed: result=0x%x action=0x%X\n",
+ resp->result, le16_to_cpu(pm->action));
/* We do not re-try enter-ps command in ad-hoc mode. */
if (le16_to_cpu(pm->action) == EN_AUTO_PS &&
(le16_to_cpu(pm->params.ps_bitmap) & BITMAP_STA_PS) &&
@@ -91,7 +92,8 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
break;
case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
- dev_err(priv->adapter->dev, "SDIO RX single-port aggregation Not support\n");
+ mwifiex_dbg(adapter, MSG,
+ "SDIO RX single-port aggregation Not support\n");
break;
default:
@@ -187,29 +189,34 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv,
u16 query_type = le16_to_cpu(smib->query_type);
u32 ul_temp;
- dev_dbg(priv->adapter->dev, "info: SNMP_RESP: oid value = %#x,"
- " query_type = %#x, buf size = %#x\n",
- oid, query_type, le16_to_cpu(smib->buf_size));
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: SNMP_RESP: oid value = %#x,\t"
+ "query_type = %#x, buf size = %#x\n",
+ oid, query_type, le16_to_cpu(smib->buf_size));
if (query_type == HostCmd_ACT_GEN_GET) {
ul_temp = le16_to_cpu(*((__le16 *) (smib->value)));
if (data_buf)
*data_buf = ul_temp;
switch (oid) {
case FRAG_THRESH_I:
- dev_dbg(priv->adapter->dev,
- "info: SNMP_RESP: FragThsd =%u\n", ul_temp);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: SNMP_RESP: FragThsd =%u\n",
+ ul_temp);
break;
case RTS_THRESH_I:
- dev_dbg(priv->adapter->dev,
- "info: SNMP_RESP: RTSThsd =%u\n", ul_temp);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: SNMP_RESP: RTSThsd =%u\n",
+ ul_temp);
break;
case SHORT_RETRY_LIM_I:
- dev_dbg(priv->adapter->dev,
- "info: SNMP_RESP: TxRetryCount=%u\n", ul_temp);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: SNMP_RESP: TxRetryCount=%u\n",
+ ul_temp);
break;
case DTIM_PERIOD_I:
- dev_dbg(priv->adapter->dev,
- "info: SNMP_RESP: DTIM period=%u\n", ul_temp);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: SNMP_RESP: DTIM period=%u\n",
+ ul_temp);
default:
break;
}
@@ -426,14 +433,15 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
priv->tx_power_level = (u16) pg->power_min;
break;
default:
- dev_err(adapter->dev, "CMD_RESP: unknown cmd action %d\n",
- action);
+ mwifiex_dbg(adapter, ERROR,
+ "CMD_RESP: unknown cmd action %d\n",
+ action);
return 0;
}
- dev_dbg(adapter->dev,
- "info: Current TxPower Level = %d, Max Power=%d, Min Power=%d\n",
- priv->tx_power_level, priv->max_tx_power_level,
- priv->min_tx_power_level);
+ mwifiex_dbg(adapter, INFO,
+ "info: Current TxPower Level = %d, Max Power=%d, Min Power=%d\n",
+ priv->tx_power_level, priv->max_tx_power_level,
+ priv->min_tx_power_level);
return 0;
}
@@ -454,10 +462,10 @@ static int mwifiex_ret_rf_tx_power(struct mwifiex_private *priv,
priv->min_tx_power_level = txp->min_power;
}
- dev_dbg(priv->adapter->dev,
- "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n",
- priv->tx_power_level, priv->max_tx_power_level,
- priv->min_tx_power_level);
+ mwifiex_dbg(priv->adapter, INFO,
+ "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n",
+ priv->tx_power_level, priv->max_tx_power_level,
+ priv->min_tx_power_level);
return 0;
}
@@ -473,18 +481,18 @@ static int mwifiex_ret_rf_antenna(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
- dev_dbg(adapter->dev,
- "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x"
- " Rx action = 0x%x, Rx Mode = 0x%04x\n",
- le16_to_cpu(ant_mimo->action_tx),
- le16_to_cpu(ant_mimo->tx_ant_mode),
- le16_to_cpu(ant_mimo->action_rx),
- le16_to_cpu(ant_mimo->rx_ant_mode));
+ mwifiex_dbg(adapter, INFO,
+ "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x\t"
+ "Rx action = 0x%x, Rx Mode = 0x%04x\n",
+ le16_to_cpu(ant_mimo->action_tx),
+ le16_to_cpu(ant_mimo->tx_ant_mode),
+ le16_to_cpu(ant_mimo->action_rx),
+ le16_to_cpu(ant_mimo->rx_ant_mode));
else
- dev_dbg(adapter->dev,
- "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n",
- le16_to_cpu(ant_siso->action),
- le16_to_cpu(ant_siso->ant_mode));
+ mwifiex_dbg(adapter, INFO,
+ "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n",
+ le16_to_cpu(ant_siso->action),
+ le16_to_cpu(ant_siso->ant_mode));
return 0;
}
@@ -502,8 +510,8 @@ static int mwifiex_ret_802_11_mac_address(struct mwifiex_private *priv,
memcpy(priv->curr_addr, cmd_mac_addr->mac_addr, ETH_ALEN);
- dev_dbg(priv->adapter->dev,
- "info: set mac address: %pM\n", priv->curr_addr);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: set mac address: %pM\n", priv->curr_addr);
return 0;
}
@@ -587,7 +595,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) {
if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) {
- dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: key: GTK is set\n");
priv->wpa_is_gtk_set = true;
priv->scan_block = false;
}
@@ -617,7 +626,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
key_v2 = &resp->params.key_material_v2;
if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) {
- dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
+ mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n");
priv->wpa_is_gtk_set = true;
priv->scan_block = false;
}
@@ -663,14 +672,14 @@ static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
- IEEE80211_COUNTRY_STRING_LEN)
/ sizeof(struct ieee80211_country_ie_triplet));
- dev_dbg(priv->adapter->dev,
- "info: 11D Domain Info Resp: no_of_triplet=%d\n",
- no_of_triplet);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: 11D Domain Info Resp: no_of_triplet=%d\n",
+ no_of_triplet);
if (no_of_triplet > MWIFIEX_MAX_TRIPLET_802_11D) {
- dev_warn(priv->adapter->dev,
- "11D: invalid number of triplets %d returned\n",
- no_of_triplet);
+ mwifiex_dbg(priv->adapter, FATAL,
+ "11D: invalid number of triplets %d returned\n",
+ no_of_triplet);
return -1;
}
@@ -680,8 +689,8 @@ static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
case HostCmd_ACT_GEN_GET:
break;
default:
- dev_err(priv->adapter->dev,
- "11D: invalid action:%d\n", domain_info->action);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "11D: invalid action:%d\n", domain_info->action);
return -1;
}
@@ -741,6 +750,19 @@ mwifiex_ret_p2p_mode_cfg(struct mwifiex_private *priv,
return 0;
}
+/* This function handles the command response of mem_access command
+ */
+static int
+mwifiex_ret_mem_access(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp, void *pioctl_buf)
+{
+ struct host_cmd_ds_mem_access *mem = (void *)&resp->params.mem;
+
+ priv->mem_rw.addr = le32_to_cpu(mem->addr);
+ priv->mem_rw.value = le32_to_cpu(mem->value);
+
+ return 0;
+}
/*
* This function handles the command response of register access.
*
@@ -830,12 +852,12 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET)
return 0;
- dev_dbg(priv->adapter->dev,
- "info: new BSSID %pM\n", ibss_coal_resp->bssid);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: new BSSID %pM\n", ibss_coal_resp->bssid);
/* If rsp has NULL BSSID, Just return..... No Action */
if (is_zero_ether_addr(ibss_coal_resp->bssid)) {
- dev_warn(priv->adapter->dev, "new BSSID is NULL\n");
+ mwifiex_dbg(priv->adapter, FATAL, "new BSSID is NULL\n");
return 0;
}
@@ -871,48 +893,48 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
case ACT_TDLS_DELETE:
if (reason) {
if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
- dev_dbg(priv->adapter->dev,
- "TDLS link delete for %pM failed: reason %d\n",
- cmd_tdls_oper->peer_mac, reason);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "TDLS link delete for %pM failed: reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
else
- dev_err(priv->adapter->dev,
- "TDLS link delete for %pM failed: reason %d\n",
- cmd_tdls_oper->peer_mac, reason);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "TDLS link delete for %pM failed: reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
} else {
- dev_dbg(priv->adapter->dev,
- "TDLS link delete for %pM successful\n",
- cmd_tdls_oper->peer_mac);
+ mwifiex_dbg(priv->adapter, MSG,
+ "TDLS link delete for %pM successful\n",
+ cmd_tdls_oper->peer_mac);
}
break;
case ACT_TDLS_CREATE:
if (reason) {
- dev_err(priv->adapter->dev,
- "TDLS link creation for %pM failed: reason %d",
- cmd_tdls_oper->peer_mac, reason);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "TDLS link creation for %pM failed: reason %d",
+ cmd_tdls_oper->peer_mac, reason);
if (node && reason != TDLS_ERR_LINK_EXISTS)
node->tdls_status = TDLS_SETUP_FAILURE;
} else {
- dev_dbg(priv->adapter->dev,
- "TDLS link creation for %pM successful",
- cmd_tdls_oper->peer_mac);
+ mwifiex_dbg(priv->adapter, MSG,
+ "TDLS link creation for %pM successful",
+ cmd_tdls_oper->peer_mac);
}
break;
case ACT_TDLS_CONFIG:
if (reason) {
- dev_err(priv->adapter->dev,
- "TDLS link config for %pM failed, reason %d\n",
- cmd_tdls_oper->peer_mac, reason);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "TDLS link config for %pM failed, reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
if (node)
node->tdls_status = TDLS_SETUP_FAILURE;
} else {
- dev_dbg(priv->adapter->dev,
- "TDLS link config for %pM successful\n",
- cmd_tdls_oper->peer_mac);
+ mwifiex_dbg(priv->adapter, MSG,
+ "TDLS link config for %pM successful\n",
+ cmd_tdls_oper->peer_mac);
}
break;
default:
- dev_err(priv->adapter->dev,
- "Unknown TDLS command action response %d", action);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Unknown TDLS command action response %d", action);
return -1;
}
@@ -929,8 +951,9 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
/* For every subscribe event command (Get/Set/Clear), FW reports the
* current set of subscribed events*/
- dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n",
- le16_to_cpu(cmd_sub_event->events));
+ mwifiex_dbg(priv->adapter, EVENT,
+ "Bitmap of currently subscribed events: %16x\n",
+ le16_to_cpu(cmd_sub_event->events));
return 0;
}
@@ -940,7 +963,7 @@ static int mwifiex_ret_cfg_data(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
if (resp->result != HostCmd_RESULT_OK) {
- dev_err(priv->adapter->dev, "Cal data cmd resp failed\n");
+ mwifiex_dbg(priv->adapter, ERROR, "Cal data cmd resp failed\n");
return -1;
}
@@ -1008,8 +1031,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_802_11_BG_SCAN_QUERY:
ret = mwifiex_ret_802_11_scan(priv, resp);
- dev_dbg(adapter->dev,
- "info: CMD_RESP: BG_SCAN result is ready!\n");
+ mwifiex_dbg(adapter, CMD,
+ "info: CMD_RESP: BG_SCAN result is ready!\n");
break;
case HostCmd_CMD_TXPWR_CFG:
ret = mwifiex_ret_tx_power_cfg(priv, resp);
@@ -1088,8 +1111,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
/ MWIFIEX_SDIO_BLOCK_SIZE)
* MWIFIEX_SDIO_BLOCK_SIZE;
adapter->curr_tx_buf_size = adapter->tx_buf_size;
- dev_dbg(adapter->dev, "cmd: curr_tx_buf_size=%d\n",
- adapter->curr_tx_buf_size);
+ mwifiex_dbg(adapter, CMD, "cmd: curr_tx_buf_size=%d\n",
+ adapter->curr_tx_buf_size);
if (adapter->if_ops.update_mp_end_port)
adapter->if_ops.update_mp_end_port(adapter,
@@ -1103,6 +1126,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_802_11_IBSS_COALESCING_STATUS:
ret = mwifiex_ret_ibss_coalescing_status(priv, resp);
break;
+ case HostCmd_CMD_MEM_ACCESS:
+ ret = mwifiex_ret_mem_access(priv, resp, data_buf);
+ break;
case HostCmd_CMD_MAC_REG_ACCESS:
case HostCmd_CMD_BBP_REG_ACCESS:
case HostCmd_CMD_RF_REG_ACCESS:
@@ -1146,8 +1172,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
break;
default:
- dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
- resp->command);
+ mwifiex_dbg(adapter, ERROR,
+ "CMD_RESP: unknown cmd response %#x\n",
+ resp->command);
break;
}
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 0dc7a1d3993d..95203780010a 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -48,7 +48,8 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
if (!priv->media_connected)
return;
- dev_dbg(adapter->dev, "info: handles disconnect event\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: handles disconnect event\n");
priv->media_connected = false;
@@ -104,12 +105,14 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
* it could be used for re-assoc
*/
- dev_dbg(adapter->dev, "info: previous SSID=%s, SSID len=%u\n",
- priv->prev_ssid.ssid, priv->prev_ssid.ssid_len);
+ mwifiex_dbg(adapter, INFO,
+ "info: previous SSID=%s, SSID len=%u\n",
+ priv->prev_ssid.ssid, priv->prev_ssid.ssid_len);
- dev_dbg(adapter->dev, "info: current SSID=%s, SSID len=%u\n",
- priv->curr_bss_params.bss_descriptor.ssid.ssid,
- priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
+ mwifiex_dbg(adapter, INFO,
+ "info: current SSID=%s, SSID len=%u\n",
+ priv->curr_bss_params.bss_descriptor.ssid.ssid,
+ priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
memcpy(&priv->prev_ssid,
&priv->curr_bss_params.bss_descriptor.ssid,
@@ -127,13 +130,13 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
if (adapter->is_cmd_timedout && adapter->curr_cmd)
return;
priv->media_connected = false;
- dev_dbg(adapter->dev,
- "info: successfully disconnected from %pM: reason code %d\n",
- priv->cfg_bssid, reason_code);
+ mwifiex_dbg(adapter, MSG,
+ "info: successfully disconnected from %pM: reason code %d\n",
+ priv->cfg_bssid, reason_code);
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
- GFP_KERNEL);
+ false, GFP_KERNEL);
}
eth_zero_addr(priv->cfg_bssid);
@@ -154,13 +157,13 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
/* reserved 2 bytes are not mandatory in tdls event */
if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
sizeof(u16) - sizeof(adapter->event_cause))) {
- dev_err(adapter->dev, "Invalid event length!\n");
+ mwifiex_dbg(adapter, ERROR, "Invalid event length!\n");
return -1;
}
sta_ptr = mwifiex_get_sta_entry(priv, tdls_evt->peer_mac);
if (!sta_ptr) {
- dev_err(adapter->dev, "cannot get sta entry!\n");
+ mwifiex_dbg(adapter, ERROR, "cannot get sta entry!\n");
return -1;
}
@@ -239,21 +242,21 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
switch (eventcause) {
case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
- dev_err(adapter->dev,
- "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL, ignore it\n");
+ mwifiex_dbg(adapter, ERROR,
+ "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL, ignore it\n");
break;
case EVENT_LINK_SENSED:
- dev_dbg(adapter->dev, "event: LINK_SENSED\n");
+ mwifiex_dbg(adapter, EVENT, "event: LINK_SENSED\n");
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
break;
case EVENT_DEAUTHENTICATED:
- dev_dbg(adapter->dev, "event: Deauthenticated\n");
+ mwifiex_dbg(adapter, EVENT, "event: Deauthenticated\n");
if (priv->wps.session_enable) {
- dev_dbg(adapter->dev,
- "info: receive deauth event in wps session\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: receive deauth event in wps session\n");
break;
}
adapter->dbg.num_event_deauth++;
@@ -265,10 +268,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_DISASSOCIATED:
- dev_dbg(adapter->dev, "event: Disassociated\n");
+ mwifiex_dbg(adapter, EVENT, "event: Disassociated\n");
if (priv->wps.session_enable) {
- dev_dbg(adapter->dev,
- "info: receive disassoc event in wps session\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: receive disassoc event in wps session\n");
break;
}
adapter->dbg.num_event_disassoc++;
@@ -280,7 +283,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_LINK_LOST:
- dev_dbg(adapter->dev, "event: Link lost\n");
+ mwifiex_dbg(adapter, EVENT, "event: Link lost\n");
adapter->dbg.num_event_link_lost++;
if (priv->media_connected) {
reason_code =
@@ -290,7 +293,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_PS_SLEEP:
- dev_dbg(adapter->dev, "info: EVENT: SLEEP\n");
+ mwifiex_dbg(adapter, EVENT, "info: EVENT: SLEEP\n");
adapter->ps_state = PS_STATE_PRE_SLEEP;
@@ -298,12 +301,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_PS_AWAKE:
- dev_dbg(adapter->dev, "info: EVENT: AWAKE\n");
+ mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
if (!adapter->pps_uapsd_mode &&
priv->media_connected && adapter->sleep_period.period) {
adapter->pps_uapsd_mode = true;
- dev_dbg(adapter->dev,
- "event: PPS/UAPSD mode activated\n");
+ mwifiex_dbg(adapter, EVENT,
+ "event: PPS/UAPSD mode activated\n");
}
adapter->tx_lock_flag = false;
if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
@@ -333,26 +336,26 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_DEEP_SLEEP_AWAKE:
adapter->if_ops.wakeup_complete(adapter);
- dev_dbg(adapter->dev, "event: DS_AWAKE\n");
+ mwifiex_dbg(adapter, EVENT, "event: DS_AWAKE\n");
if (adapter->is_deep_sleep)
adapter->is_deep_sleep = false;
break;
case EVENT_HS_ACT_REQ:
- dev_dbg(adapter->dev, "event: HS_ACT_REQ\n");
+ mwifiex_dbg(adapter, EVENT, "event: HS_ACT_REQ\n");
ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_HS_CFG_ENH,
0, 0, NULL, false);
break;
case EVENT_MIC_ERR_UNICAST:
- dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n");
+ mwifiex_dbg(adapter, EVENT, "event: UNICAST MIC ERROR\n");
cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
NL80211_KEYTYPE_PAIRWISE,
-1, NULL, GFP_KERNEL);
break;
case EVENT_MIC_ERR_MULTICAST:
- dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n");
+ mwifiex_dbg(adapter, EVENT, "event: MULTICAST MIC ERROR\n");
cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
NL80211_KEYTYPE_GROUP,
-1, NULL, GFP_KERNEL);
@@ -362,7 +365,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_ADHOC_BCN_LOST:
- dev_dbg(adapter->dev, "event: ADHOC_BCN_LOST\n");
+ mwifiex_dbg(adapter, EVENT, "event: ADHOC_BCN_LOST\n");
priv->adhoc_is_link_sensed = false;
mwifiex_clean_txrx(priv);
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
@@ -371,17 +374,17 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_BG_SCAN_REPORT:
- dev_dbg(adapter->dev, "event: BGS_REPORT\n");
+ mwifiex_dbg(adapter, EVENT, "event: BGS_REPORT\n");
ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_QUERY,
HostCmd_ACT_GEN_GET, 0, NULL, false);
break;
case EVENT_PORT_RELEASE:
- dev_dbg(adapter->dev, "event: PORT RELEASE\n");
+ mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n");
break;
case EVENT_EXT_SCAN_REPORT:
- dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+ mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
if (adapter->ext_scan)
ret = mwifiex_handle_event_ext_scan_report(priv,
adapter->event_skb->data);
@@ -389,7 +392,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_WMM_STATUS_CHANGE:
- dev_dbg(adapter->dev, "event: WMM status changed\n");
+ mwifiex_dbg(adapter, EVENT, "event: WMM status changed\n");
ret = mwifiex_send_cmd(priv, HostCmd_CMD_WMM_GET_STATUS,
0, 0, NULL, false);
break;
@@ -401,13 +404,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
HostCmd_ACT_GEN_GET, 0, NULL, false);
priv->subsc_evt_rssi_state = RSSI_LOW_RECVD;
- dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n");
+ mwifiex_dbg(adapter, EVENT, "event: Beacon RSSI_LOW\n");
break;
case EVENT_SNR_LOW:
- dev_dbg(adapter->dev, "event: Beacon SNR_LOW\n");
+ mwifiex_dbg(adapter, EVENT, "event: Beacon SNR_LOW\n");
break;
case EVENT_MAX_FAIL:
- dev_dbg(adapter->dev, "event: MAX_FAIL\n");
+ mwifiex_dbg(adapter, EVENT, "event: MAX_FAIL\n");
break;
case EVENT_RSSI_HIGH:
cfg80211_cqm_rssi_notify(priv->netdev,
@@ -416,47 +419,47 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
HostCmd_ACT_GEN_GET, 0, NULL, false);
priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD;
- dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n");
+ mwifiex_dbg(adapter, EVENT, "event: Beacon RSSI_HIGH\n");
break;
case EVENT_SNR_HIGH:
- dev_dbg(adapter->dev, "event: Beacon SNR_HIGH\n");
+ mwifiex_dbg(adapter, EVENT, "event: Beacon SNR_HIGH\n");
break;
case EVENT_DATA_RSSI_LOW:
- dev_dbg(adapter->dev, "event: Data RSSI_LOW\n");
+ mwifiex_dbg(adapter, EVENT, "event: Data RSSI_LOW\n");
break;
case EVENT_DATA_SNR_LOW:
- dev_dbg(adapter->dev, "event: Data SNR_LOW\n");
+ mwifiex_dbg(adapter, EVENT, "event: Data SNR_LOW\n");
break;
case EVENT_DATA_RSSI_HIGH:
- dev_dbg(adapter->dev, "event: Data RSSI_HIGH\n");
+ mwifiex_dbg(adapter, EVENT, "event: Data RSSI_HIGH\n");
break;
case EVENT_DATA_SNR_HIGH:
- dev_dbg(adapter->dev, "event: Data SNR_HIGH\n");
+ mwifiex_dbg(adapter, EVENT, "event: Data SNR_HIGH\n");
break;
case EVENT_LINK_QUALITY:
- dev_dbg(adapter->dev, "event: Link Quality\n");
+ mwifiex_dbg(adapter, EVENT, "event: Link Quality\n");
break;
case EVENT_PRE_BEACON_LOST:
- dev_dbg(adapter->dev, "event: Pre-Beacon Lost\n");
+ mwifiex_dbg(adapter, EVENT, "event: Pre-Beacon Lost\n");
break;
case EVENT_IBSS_COALESCED:
- dev_dbg(adapter->dev, "event: IBSS_COALESCED\n");
+ mwifiex_dbg(adapter, EVENT, "event: IBSS_COALESCED\n");
ret = mwifiex_send_cmd(priv,
HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
HostCmd_ACT_GEN_GET, 0, NULL, false);
break;
case EVENT_ADDBA:
- dev_dbg(adapter->dev, "event: ADDBA Request\n");
+ mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n");
mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
HostCmd_ACT_GEN_SET, 0,
adapter->event_body, false);
break;
case EVENT_DELBA:
- dev_dbg(adapter->dev, "event: DELBA Request\n");
+ mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n");
mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
break;
case EVENT_BA_STREAM_TIEMOUT:
- dev_dbg(adapter->dev, "event: BA Stream timeout\n");
+ mwifiex_dbg(adapter, EVENT, "event: BA Stream timeout\n");
mwifiex_11n_ba_stream_timeout(priv,
(struct host_cmd_ds_11n_batimeout
*)
@@ -464,28 +467,31 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_AMSDU_AGGR_CTRL:
ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
- dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+ mwifiex_dbg(adapter, EVENT,
+ "event: AMSDU_AGGR_CTRL %d\n", ctrl);
adapter->tx_buf_size =
min_t(u16, adapter->curr_tx_buf_size, ctrl);
- dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
- adapter->tx_buf_size);
+ mwifiex_dbg(adapter, EVENT, "event: tx_buf_size %d\n",
+ adapter->tx_buf_size);
break;
case EVENT_WEP_ICV_ERR:
- dev_dbg(adapter->dev, "event: WEP ICV error\n");
+ mwifiex_dbg(adapter, EVENT, "event: WEP ICV error\n");
break;
case EVENT_BW_CHANGE:
- dev_dbg(adapter->dev, "event: BW Change\n");
+ mwifiex_dbg(adapter, EVENT, "event: BW Change\n");
break;
case EVENT_HOSTWAKE_STAIE:
- dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
+ mwifiex_dbg(adapter, EVENT,
+ "event: HOSTWAKE_STAIE %d\n", eventcause);
break;
case EVENT_REMAIN_ON_CHAN_EXPIRED:
- dev_dbg(adapter->dev, "event: Remain on channel expired\n");
+ mwifiex_dbg(adapter, EVENT,
+ "event: Remain on channel expired\n");
cfg80211_remain_on_channel_expired(&priv->wdev,
priv->roc_cfg.cookie,
&priv->roc_cfg.chan,
@@ -496,7 +502,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_CHANNEL_SWITCH_ANN:
- dev_dbg(adapter->dev, "event: Channel Switch Announcement\n");
+ mwifiex_dbg(adapter, EVENT, "event: Channel Switch Announcement\n");
priv->csa_expire_time =
jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME);
priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel;
@@ -511,23 +517,23 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_TX_STATUS_REPORT:
- dev_dbg(adapter->dev, "event: TX_STATUS Report\n");
+ mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
mwifiex_parse_tx_status_event(priv, adapter->event_body);
break;
case EVENT_CHANNEL_REPORT_RDY:
- dev_dbg(adapter->dev, "event: Channel Report\n");
+ mwifiex_dbg(adapter, EVENT, "event: Channel Report\n");
ret = mwifiex_11h_handle_chanrpt_ready(priv,
adapter->event_skb);
break;
case EVENT_RADAR_DETECTED:
- dev_dbg(adapter->dev, "event: Radar detected\n");
+ mwifiex_dbg(adapter, EVENT, "event: Radar detected\n");
ret = mwifiex_11h_handle_radar_detected(priv,
adapter->event_skb);
break;
default:
- dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
- eventcause);
+ mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n",
+ eventcause);
break;
}
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 0599e41e253c..d8b7d9c20450 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -64,7 +64,10 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
*(cmd_queued->condition),
(12 * HZ));
if (status <= 0) {
- dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
+ if (status == 0)
+ status = -ETIMEDOUT;
+ mwifiex_dbg(adapter, ERROR,
+ "cmd_wait_q terminated: %d\n", status);
mwifiex_cancel_all_pending_cmd(adapter);
return status;
}
@@ -91,7 +94,8 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
old_pkt_filter = priv->curr_pkt_filter;
if (mcast_list->mode == MWIFIEX_PROMISC_MODE) {
- dev_dbg(priv->adapter->dev, "info: Enable Promiscuous mode\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: Enable Promiscuous mode\n");
priv->curr_pkt_filter |= HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
priv->curr_pkt_filter &=
~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
@@ -99,16 +103,16 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
/* Multicast */
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
- dev_dbg(priv->adapter->dev,
- "info: Enabling All Multicast!\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: Enabling All Multicast!\n");
priv->curr_pkt_filter |=
HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
} else {
priv->curr_pkt_filter &=
~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
- dev_dbg(priv->adapter->dev,
- "info: Set multicast list=%d\n",
- mcast_list->num_multicast_addr);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: Set multicast list=%d\n",
+ mcast_list->num_multicast_addr);
/* Send multicast addresses to firmware */
ret = mwifiex_send_cmd(priv,
HostCmd_CMD_MAC_MULTICAST_ADR,
@@ -116,9 +120,9 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
mcast_list, false);
}
}
- dev_dbg(priv->adapter->dev,
- "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
- old_pkt_filter, priv->curr_pkt_filter);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
+ old_pkt_filter, priv->curr_pkt_filter);
if (old_pkt_filter != priv->curr_pkt_filter) {
ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
HostCmd_ACT_GEN_SET,
@@ -151,7 +155,8 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
rcu_read_unlock();
if (!beacon_ie) {
- dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ " failed to alloc beacon_ie\n");
return -ENOMEM;
}
@@ -165,7 +170,8 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
bss_desc->bss_band = bss_priv->band;
bss_desc->fw_tsf = bss_priv->fw_tsf;
if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
- dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: InterpretIE: AP WEP enabled\n");
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
} else {
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
@@ -219,8 +225,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
if (!strncmp(priv->adapter->country_code, &country_ie[2], 2)) {
rcu_read_unlock();
- wiphy_dbg(priv->wdev.wiphy,
- "11D: skip setting domain info in FW\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "11D: skip setting domain info in FW\n");
return 0;
}
memcpy(priv->adapter->country_code, &country_ie[2], 2);
@@ -241,8 +247,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
HostCmd_ACT_GEN_SET, 0, NULL, false)) {
- wiphy_err(priv->adapter->wiphy,
- "11D: setting domain info in FW\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "11D: setting domain info in FW fail\n");
return -1;
}
@@ -304,14 +310,15 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
if (mwifiex_11h_get_csa_closed_channel(priv) ==
(u8)bss_desc->channel) {
- dev_err(adapter->dev,
- "Attempt to reconnect on csa closed chan(%d)\n",
- bss_desc->channel);
+ mwifiex_dbg(adapter, ERROR,
+ "Attempt to reconnect on csa closed chan(%d)\n",
+ bss_desc->channel);
goto done;
}
- dev_dbg(adapter->dev, "info: SSID found in scan list ... "
- "associating...\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: SSID found in scan list ...\t"
+ "associating...\n");
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
@@ -353,15 +360,17 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
netif_carrier_off(priv->netdev);
if (!ret) {
- dev_dbg(adapter->dev, "info: network found in scan"
- " list. Joining...\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: network found in scan\t"
+ " list. Joining...\n");
ret = mwifiex_adhoc_join(priv, bss_desc);
if (bss)
cfg80211_put_bss(priv->adapter->wiphy, bss);
} else {
- dev_dbg(adapter->dev, "info: Network not found in "
- "the list, creating adhoc with ssid = %s\n",
- req_ssid->ssid);
+ mwifiex_dbg(adapter, INFO,
+ "info: Network not found in\t"
+ "the list, creating adhoc with ssid = %s\n",
+ req_ssid->ssid);
ret = mwifiex_adhoc_start(priv, req_ssid);
}
}
@@ -396,8 +405,9 @@ int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
switch (action) {
case HostCmd_ACT_GEN_SET:
if (adapter->pps_uapsd_mode) {
- dev_dbg(adapter->dev, "info: Host Sleep IOCTL"
- " is blocked in UAPSD/PPS mode\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: Host Sleep IOCTL\t"
+ "is blocked in UAPSD/PPS mode\n");
status = -1;
break;
}
@@ -494,7 +504,8 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
}
if (adapter->hs_activated) {
- dev_dbg(adapter->dev, "cmd: HS Already activated\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: HS Already activated\n");
return true;
}
@@ -510,14 +521,16 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
MWIFIEX_BSS_ROLE_STA),
HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD,
&hscfg)) {
- dev_err(adapter->dev, "IOCTL request HS enable failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "IOCTL request HS enable failed\n");
return false;
}
if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q,
adapter->hs_activate_wait_q_woken,
(10 * HZ)) <= 0) {
- dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
+ mwifiex_dbg(adapter, ERROR,
+ "hs_activate_wait_q terminated\n");
return false;
}
@@ -637,10 +650,11 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
dbm = (u16) power_cfg->power_level;
if ((dbm < priv->min_tx_power_level) ||
(dbm > priv->max_tx_power_level)) {
- dev_err(priv->adapter->dev, "txpower value %d dBm"
- " is out of range (%d dBm-%d dBm)\n",
- dbm, priv->min_tx_power_level,
- priv->max_tx_power_level);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "txpower value %d dBm\t"
+ "is out of range (%d dBm-%d dBm)\n",
+ dbm, priv->min_tx_power_level,
+ priv->max_tx_power_level);
return -1;
}
}
@@ -739,14 +753,15 @@ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv,
{
if (ie_len) {
if (ie_len > sizeof(priv->wpa_ie)) {
- dev_err(priv->adapter->dev,
- "failed to copy WPA IE, too big\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "failed to copy WPA IE, too big\n");
return -1;
}
memcpy(priv->wpa_ie, ie_data_ptr, ie_len);
priv->wpa_ie_len = (u8) ie_len;
- dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n",
- priv->wpa_ie_len, priv->wpa_ie[0]);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: Set Wpa_ie_len=%d IE=%#x\n",
+ priv->wpa_ie_len, priv->wpa_ie[0]);
if (priv->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC) {
priv->sec_info.wpa_enabled = true;
@@ -759,8 +774,9 @@ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv,
} else {
memset(priv->wpa_ie, 0, sizeof(priv->wpa_ie));
priv->wpa_ie_len = 0;
- dev_dbg(priv->adapter->dev, "info: reset wpa_ie_len=%d IE=%#x\n",
- priv->wpa_ie_len, priv->wpa_ie[0]);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: reset wpa_ie_len=%d IE=%#x\n",
+ priv->wpa_ie_len, priv->wpa_ie[0]);
priv->sec_info.wpa_enabled = false;
priv->sec_info.wpa2_enabled = false;
}
@@ -780,23 +796,24 @@ static int mwifiex_set_wapi_ie(struct mwifiex_private *priv,
{
if (ie_len) {
if (ie_len > sizeof(priv->wapi_ie)) {
- dev_dbg(priv->adapter->dev,
- "info: failed to copy WAPI IE, too big\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "info: failed to copy WAPI IE, too big\n");
return -1;
}
memcpy(priv->wapi_ie, ie_data_ptr, ie_len);
priv->wapi_ie_len = ie_len;
- dev_dbg(priv->adapter->dev, "cmd: Set wapi_ie_len=%d IE=%#x\n",
- priv->wapi_ie_len, priv->wapi_ie[0]);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: Set wapi_ie_len=%d IE=%#x\n",
+ priv->wapi_ie_len, priv->wapi_ie[0]);
if (priv->wapi_ie[0] == WLAN_EID_BSS_AC_ACCESS_DELAY)
priv->sec_info.wapi_enabled = true;
} else {
memset(priv->wapi_ie, 0, sizeof(priv->wapi_ie));
priv->wapi_ie_len = ie_len;
- dev_dbg(priv->adapter->dev,
- "info: Reset wapi_ie_len=%d IE=%#x\n",
- priv->wapi_ie_len, priv->wapi_ie[0]);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: Reset wapi_ie_len=%d IE=%#x\n",
+ priv->wapi_ie_len, priv->wapi_ie[0]);
priv->sec_info.wapi_enabled = false;
}
return 0;
@@ -814,8 +831,8 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv,
{
if (ie_len) {
if (ie_len > MWIFIEX_MAX_VSIE_LEN) {
- dev_dbg(priv->adapter->dev,
- "info: failed to copy WPS IE, too big\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "info: failed to copy WPS IE, too big\n");
return -1;
}
@@ -825,13 +842,14 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv,
memcpy(priv->wps_ie, ie_data_ptr, ie_len);
priv->wps_ie_len = ie_len;
- dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n",
- priv->wps_ie_len, priv->wps_ie[0]);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: Set wps_ie_len=%d IE=%#x\n",
+ priv->wps_ie_len, priv->wps_ie[0]);
} else {
kfree(priv->wps_ie);
priv->wps_ie_len = ie_len;
- dev_dbg(priv->adapter->dev,
- "info: Reset wps_ie_len=%d\n", priv->wps_ie_len);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: Reset wps_ie_len=%d\n", priv->wps_ie_len);
}
return 0;
}
@@ -875,8 +893,8 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
/* Copy the required key as the current key */
wep_key = &priv->wep_key[index];
if (!wep_key->key_length) {
- dev_err(adapter->dev,
- "key not set, so cannot enable it\n");
+ mwifiex_dbg(adapter, ERROR,
+ "key not set, so cannot enable it\n");
return -1;
}
@@ -953,7 +971,8 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
/* Current driver only supports key length of up to 32 bytes */
if (encrypt_key->key_len > WLAN_MAX_KEY_LEN) {
- dev_err(priv->adapter->dev, "key length too long\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "key length too long\n");
return -1;
}
@@ -1040,7 +1059,7 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
snprintf(version, max_len, driver_version, fw_ver);
- dev_dbg(adapter->dev, "info: MWIFIEX VERSION: %s\n", version);
+ mwifiex_dbg(adapter, MSG, "info: MWIFIEX VERSION: %s\n", version);
return 0;
}
@@ -1128,7 +1147,8 @@ mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
}
if (mwifiex_send_cmd(priv, HostCmd_CMD_REMAIN_ON_CHAN,
action, 0, &roc_cfg, true)) {
- dev_err(priv->adapter->dev, "failed to remain on channel\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "failed to remain on channel\n");
return -1;
}
@@ -1313,8 +1333,8 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
if ((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) &&
(!memcmp(pvendor_ie->oui, wps_oui, sizeof(wps_oui)))) {
priv->wps.session_enable = true;
- dev_dbg(priv->adapter->dev,
- "info: WPS Session Enabled.\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: WPS Session Enabled.\n");
ret = mwifiex_set_wps_ie(priv, ie_data_ptr, ie_len);
}
@@ -1361,7 +1381,8 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
memset(adapter->arp_filter, 0, sizeof(adapter->arp_filter));
if (gen_ie->len > ARP_FILTER_MAX_BUF_SIZE) {
adapter->arp_filter_size = 0;
- dev_err(adapter->dev, "invalid ARP filter size\n");
+ mwifiex_dbg(adapter, ERROR,
+ "invalid ARP filter size\n");
return -1;
} else {
memcpy(adapter->arp_filter, gen_ie->ie_data,
@@ -1370,7 +1391,7 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
}
break;
default:
- dev_err(adapter->dev, "invalid IE type\n");
+ mwifiex_dbg(adapter, ERROR, "invalid IE type\n");
return -1;
}
return 0;
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index b8729c9394e9..d4d4cb1ce95b 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -141,7 +141,7 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
if (priv->hs2_enabled &&
mwifiex_discard_gratuitous_arp(priv, skb)) {
- dev_dbg(priv->adapter->dev, "Bypassed Gratuitous ARP\n");
+ mwifiex_dbg(priv->adapter, INFO, "Bypassed Gratuitous ARP\n");
dev_kfree_skb_any(skb);
return 0;
}
@@ -166,7 +166,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
ret = mwifiex_recv_packet(priv, skb);
if (ret == -1)
- dev_err(priv->adapter->dev, "recv packet failed\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "recv packet failed\n");
return ret;
}
@@ -203,9 +204,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
- dev_err(adapter->dev,
- "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
- skb->len, rx_pkt_offset, rx_pkt_length);
+ mwifiex_dbg(adapter, ERROR,
+ "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
+ skb->len, rx_pkt_offset, rx_pkt_length);
priv->stats.rx_dropped++;
dev_kfree_skb_any(skb);
return ret;
@@ -214,7 +215,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
if (rx_pkt_type == PKT_TYPE_MGMT) {
ret = mwifiex_process_mgmt_packet(priv, skb);
if (ret)
- dev_err(adapter->dev, "Rx of mgmt packet failed");
+ mwifiex_dbg(adapter, ERROR, "Rx of mgmt packet failed");
dev_kfree_skb_any(skb);
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 5ce2d9a4f919..355ac5904fac 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -53,7 +53,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
INTF_HEADER_LEN;
if (!skb->len) {
- dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
+ mwifiex_dbg(adapter, ERROR,
+ "Tx: bad packet length: %d\n", skb->len);
tx_info->status_code = -1;
return skb->data;
}
@@ -184,21 +185,24 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
switch (ret) {
case -EBUSY:
dev_kfree_skb_any(skb);
- dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
- __func__, ret);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: host_to_card failed: ret=%d\n",
+ __func__, ret);
adapter->dbg.num_tx_host_to_card_failure++;
break;
case -1:
adapter->data_sent = false;
dev_kfree_skb_any(skb);
- dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
- __func__, ret);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: host_to_card failed: ret=%d\n",
+ __func__, ret);
adapter->dbg.num_tx_host_to_card_failure++;
break;
case 0:
dev_kfree_skb_any(skb);
- dev_dbg(adapter->dev, "data: %s: host_to_card succeeded\n",
- __func__);
+ mwifiex_dbg(adapter, DATA,
+ "data: %s: host_to_card succeeded\n",
+ __func__);
adapter->tx_lock_flag = true;
break;
case -EINPROGRESS:
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index 087d84762cd3..2faa1bc42abe 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -37,7 +37,7 @@ static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
u32 tid;
u8 tid_down;
- dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+ mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
@@ -94,7 +94,7 @@ static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv,
unsigned long flags;
int i;
- dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+ mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
for (i = 0; i < MAX_NUM_TID; i++) {
@@ -132,8 +132,8 @@ mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv,
supp_rates_size = min_t(u16, rates_size, MWIFIEX_TDLS_SUPPORTED_RATES);
if (skb_tailroom(skb) < rates_size + 4) {
- dev_err(priv->adapter->dev,
- "Insuffient space while adding rates\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Insuffient space while adding rates\n");
return -ENOMEM;
}
@@ -199,8 +199,8 @@ mwifiex_tdls_add_ht_oper(struct mwifiex_private *priv, const u8 *mac,
sta_ptr = mwifiex_get_sta_entry(priv, mac);
if (unlikely(!sta_ptr)) {
- dev_warn(priv->adapter->dev,
- "TDLS peer station not found in list\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "TDLS peer station not found in list\n");
return -1;
}
@@ -247,15 +247,16 @@ static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
sta_ptr = mwifiex_get_sta_entry(priv, mac);
if (unlikely(!sta_ptr)) {
- dev_warn(adapter->dev, "TDLS peer station not found in list\n");
+ mwifiex_dbg(adapter, ERROR,
+ "TDLS peer station not found in list\n");
return -1;
}
if (!mwifiex_is_bss_in_11ac_mode(priv)) {
if (sta_ptr->tdls_cap.extcap.ext_capab[7] &
WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
- dev_dbg(adapter->dev,
- "TDLS peer doesn't support wider bandwitdh\n");
+ mwifiex_dbg(adapter, WARN,
+ "TDLS peer doesn't support wider bandwidth\n");
return 0;
}
} else {
@@ -554,7 +555,7 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
tf->u.discover_req.dialog_token = dialog_token;
break;
default:
- dev_err(priv->adapter->dev, "Unknown TDLS frame type.\n");
+ mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
return -EINVAL;
}
@@ -608,8 +609,8 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
skb = dev_alloc_skb(skb_len);
if (!skb) {
- dev_err(priv->adapter->dev,
- "allocate skb failed for management frame\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "allocate skb failed for management frame\n");
return -ENOMEM;
}
skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
@@ -742,7 +743,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
mwifiex_tdls_add_qos_capab(skb);
break;
default:
- dev_err(priv->adapter->dev, "Unknown TDLS action frame type\n");
+ mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS action frame type\n");
return -EINVAL;
}
@@ -781,8 +782,8 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
skb = dev_alloc_skb(skb_len);
if (!skb) {
- dev_err(priv->adapter->dev,
- "allocate skb failed for management frame\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "allocate skb failed for management frame\n");
return -ENOMEM;
}
@@ -848,8 +849,8 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
peer = buf + ETH_ALEN;
action = *(buf + sizeof(struct ethhdr) + 2);
- dev_dbg(priv->adapter->dev,
- "rx:tdls action: peer=%pM, action=%d\n", peer, action);
+ mwifiex_dbg(priv->adapter, DATA,
+ "rx:tdls action: peer=%pM, action=%d\n", peer, action);
switch (action) {
case WLAN_TDLS_SETUP_REQUEST:
@@ -880,7 +881,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
break;
default:
- dev_dbg(priv->adapter->dev, "Unknown TDLS frame type.\n");
+ mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
return;
}
@@ -967,8 +968,8 @@ mwifiex_tdls_process_config_link(struct mwifiex_private *priv, const u8 *peer)
sta_ptr = mwifiex_get_sta_entry(priv, peer);
if (!sta_ptr || sta_ptr->tdls_status == TDLS_SETUP_FAILURE) {
- dev_err(priv->adapter->dev,
- "link absent for peer %pM; cannot config\n", peer);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "link absent for peer %pM; cannot config\n", peer);
return -EINVAL;
}
@@ -988,8 +989,8 @@ mwifiex_tdls_process_create_link(struct mwifiex_private *priv, const u8 *peer)
sta_ptr = mwifiex_get_sta_entry(priv, peer);
if (sta_ptr && sta_ptr->tdls_status == TDLS_SETUP_INPROGRESS) {
- dev_dbg(priv->adapter->dev,
- "Setup already in progress for peer %pM\n", peer);
+ mwifiex_dbg(priv->adapter, WARN,
+ "Setup already in progress for peer %pM\n", peer);
return 0;
}
@@ -1046,8 +1047,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
sta_ptr = mwifiex_get_sta_entry(priv, peer);
if (sta_ptr && (sta_ptr->tdls_status != TDLS_SETUP_FAILURE)) {
- dev_dbg(priv->adapter->dev,
- "tdls: enable link %pM success\n", peer);
+ mwifiex_dbg(priv->adapter, MSG,
+ "tdls: enable link %pM success\n", peer);
sta_ptr->tdls_status = TDLS_SETUP_COMPLETE;
@@ -1076,8 +1077,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
mwifiex_auto_tdls_update_peer_status(priv, peer,
TDLS_SETUP_COMPLETE);
} else {
- dev_dbg(priv->adapter->dev,
- "tdls: enable link %pM failed\n", peer);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "tdls: enable link %pM failed\n", peer);
if (sta_ptr) {
mwifiex_11n_cleanup_reorder_tbl(priv);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
@@ -1180,9 +1181,9 @@ void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
if (mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
HostCmd_ACT_GEN_SET, 0, &tdls_oper, false))
- dev_warn(priv->adapter->dev,
- "Disable link failed for TDLS peer %pM",
- sta_ptr->mac_addr);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Disable link failed for TDLS peer %pM",
+ sta_ptr->mac_addr);
}
mwifiex_del_all_sta_list(priv);
@@ -1204,9 +1205,9 @@ int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb)
(peer->failure_count <
MWIFIEX_TDLS_MAX_FAIL_COUNT)) {
peer->tdls_status = TDLS_SETUP_INPROGRESS;
- dev_dbg(priv->adapter->dev,
- "setup TDLS link, peer=%pM rssi=%d\n",
- peer->mac_addr, peer->rssi);
+ mwifiex_dbg(priv->adapter, INFO,
+ "setup TDLS link, peer=%pM rssi=%d\n",
+ peer->mac_addr, peer->rssi);
cfg80211_tdls_oper_request(priv->netdev,
peer->mac_addr,
@@ -1272,8 +1273,8 @@ void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac)
tdls_peer->rssi_jiffies = jiffies;
INIT_LIST_HEAD(&tdls_peer->list);
list_add_tail(&tdls_peer->list, &priv->auto_tdls_list);
- dev_dbg(priv->adapter->dev, "Add auto TDLS peer= %pM to list\n",
- mac);
+ mwifiex_dbg(priv->adapter, INFO,
+ "Add auto TDLS peer= %pM to list\n", mac);
}
spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
@@ -1341,8 +1342,8 @@ void mwifiex_check_auto_tdls(unsigned long context)
return;
if (!priv->auto_tdls_timer_active) {
- dev_dbg(priv->adapter->dev,
- "auto TDLS timer inactive; return");
+ mwifiex_dbg(priv->adapter, INFO,
+ "auto TDLS timer inactive; return");
return;
}
@@ -1368,9 +1369,9 @@ void mwifiex_check_auto_tdls(unsigned long context)
!tdls_peer->rssi) &&
tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) {
tdls_peer->tdls_status = TDLS_LINK_TEARDOWN;
- dev_dbg(priv->adapter->dev,
- "teardown TDLS link,peer=%pM rssi=%d\n",
- tdls_peer->mac_addr, -tdls_peer->rssi);
+ mwifiex_dbg(priv->adapter, MSG,
+ "teardown TDLS link,peer=%pM rssi=%d\n",
+ tdls_peer->mac_addr, -tdls_peer->rssi);
tdls_peer->do_discover = true;
priv->check_tdls_tx = true;
cfg80211_tdls_oper_request(priv->netdev,
@@ -1384,9 +1385,10 @@ void mwifiex_check_auto_tdls(unsigned long context)
MWIFIEX_TDLS_MAX_FAIL_COUNT) {
priv->check_tdls_tx = true;
tdls_peer->do_setup = true;
- dev_dbg(priv->adapter->dev,
- "check TDLS with peer=%pM rssi=%d\n",
- tdls_peer->mac_addr, -tdls_peer->rssi);
+ mwifiex_dbg(priv->adapter, INFO,
+ "check TDLS with peer=%pM\t"
+ "rssi=%d\n", tdls_peer->mac_addr,
+ tdls_peer->rssi);
}
}
spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index a245f444aeec..28dcc84a34d2 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -50,11 +50,15 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
if (!priv) {
- dev_err(adapter->dev, "data: priv not found. Drop RX packet\n");
+ mwifiex_dbg(adapter, ERROR,
+ "data: priv not found. Drop RX packet\n");
dev_kfree_skb_any(skb);
return -1;
}
+ mwifiex_dbg_dump(adapter, DAT_D, "rx pkt:", skb->data,
+ min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN));
+
memset(rx_info, 0, sizeof(*rx_info));
rx_info->bss_num = priv->bss_num;
rx_info->bss_type = priv->bss_type;
@@ -112,10 +116,12 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
skb, tx_param);
}
}
+ mwifiex_dbg_dump(adapter, DAT_D, "tx pkt:", skb->data,
+ min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN));
switch (ret) {
case -ENOSR:
- dev_dbg(adapter->dev, "data: -ENOSR is returned\n");
+ mwifiex_dbg(adapter, ERROR, "data: -ENOSR is returned\n");
break;
case -EBUSY:
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
@@ -124,13 +130,14 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
if (local_tx_pd)
local_tx_pd->flags = 0;
}
- dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+ mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
break;
case -1:
if (adapter->iface_type != MWIFIEX_PCIE)
adapter->data_sent = false;
- dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
- ret);
+ mwifiex_dbg(adapter, ERROR,
+ "mwifiex_write_data_async failed: 0x%X\n",
+ ret);
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb, 0, ret);
break;
@@ -162,7 +169,8 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter,
priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num,
tx_info->bss_type);
if (!priv) {
- dev_err(adapter->dev, "data: priv not found. Drop TX packet\n");
+ mwifiex_dbg(adapter, ERROR,
+ "data: priv not found. Drop TX packet\n");
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb, 0, 0);
return ret;
@@ -187,7 +195,7 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter,
}
switch (ret) {
case -ENOSR:
- dev_err(adapter->dev, "data: -ENOSR is returned\n");
+ mwifiex_dbg(adapter, ERROR, "data: -ENOSR is returned\n");
break;
case -EBUSY:
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
@@ -202,13 +210,13 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter,
atomic_add(tx_info->aggr_num, &adapter->tx_queued);
else
atomic_inc(&adapter->tx_queued);
- dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+ mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
break;
case -1:
if (adapter->iface_type != MWIFIEX_PCIE)
adapter->data_sent = false;
- dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
- ret);
+ mwifiex_dbg(adapter, ERROR,
+ "mwifiex_write_data_async failed: 0x%X\n", ret);
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb, 0, ret);
break;
@@ -319,7 +327,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
txq = netdev_get_tx_queue(priv->netdev, index);
if (netif_tx_queue_stopped(txq)) {
netif_tx_wake_queue(txq);
- dev_dbg(adapter->dev, "wake queue: %d\n", index);
+ mwifiex_dbg(adapter, DATA, "wake queue: %d\n", index);
}
}
done:
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index f5c2af01ba0a..a4ae28353b6d 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -167,7 +167,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
params->beacon.tail_len);
if (ht_ie) {
- memcpy(&bss_cfg->ht_cap, ht_ie,
+ memcpy(&bss_cfg->ht_cap, ht_ie + 2,
sizeof(struct ieee80211_ht_cap));
cap_info = le16_to_cpu(bss_cfg->ht_cap.cap_info);
memset(&bss_cfg->ht_cap.mcs, 0,
@@ -184,8 +184,8 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
break;
default:
- dev_warn(priv->adapter->dev,
- "Unsupported RX-STBC, default to 2x2\n");
+ mwifiex_dbg(priv->adapter, WARN,
+ "Unsupported RX-STBC, default to 2x2\n");
bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
break;
@@ -767,8 +767,8 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
return -1;
break;
default:
- dev_err(priv->adapter->dev,
- "PREP_CMD: unknown cmd %#x\n", cmd_no);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "PREP_CMD: unknown cmd %#x\n", cmd_no);
return -1;
}
@@ -806,24 +806,28 @@ int mwifiex_config_start_uap(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg)
{
if (mwifiex_del_mgmt_ies(priv))
- dev_err(priv->adapter->dev, "Failed to delete mgmt IEs!\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to delete mgmt IEs!\n");
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
HostCmd_ACT_GEN_SET, 0, NULL, true)) {
- dev_err(priv->adapter->dev, "Failed to stop the BSS\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to stop the BSS\n");
return -1;
}
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
HostCmd_ACT_GEN_SET,
UAP_BSS_PARAMS_I, bss_cfg, false)) {
- dev_err(priv->adapter->dev, "Failed to set the SSID\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to set the SSID\n");
return -1;
}
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
HostCmd_ACT_GEN_SET, 0, NULL, false)) {
- dev_err(priv->adapter->dev, "Failed to start the BSS\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to start the BSS\n");
return -1;
}
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index f4794cdc36d2..06ce3fe660f1 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -80,8 +80,8 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
node = mwifiex_add_sta_entry(priv, event->sta_addr);
if (!node) {
- dev_warn(adapter->dev,
- "could not create station entry!\n");
+ mwifiex_dbg(adapter, ERROR,
+ "could not create station entry!\n");
return -1;
}
@@ -128,7 +128,8 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
break;
case EVENT_UAP_BSS_START:
- dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+ mwifiex_dbg(adapter, EVENT,
+ "AP EVENT: event id: %#x\n", eventcause);
memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
ETH_ALEN);
if (priv->hist_data)
@@ -136,50 +137,53 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
break;
case EVENT_UAP_MIC_COUNTERMEASURES:
/* For future development */
- dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+ mwifiex_dbg(adapter, EVENT,
+ "AP EVENT: event id: %#x\n", eventcause);
break;
case EVENT_AMSDU_AGGR_CTRL:
ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
- dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+ mwifiex_dbg(adapter, EVENT,
+ "event: AMSDU_AGGR_CTRL %d\n", ctrl);
if (priv->media_connected) {
adapter->tx_buf_size =
min_t(u16, adapter->curr_tx_buf_size, ctrl);
- dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
- adapter->tx_buf_size);
+ mwifiex_dbg(adapter, EVENT,
+ "event: tx_buf_size %d\n",
+ adapter->tx_buf_size);
}
break;
case EVENT_ADDBA:
- dev_dbg(adapter->dev, "event: ADDBA Request\n");
+ mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n");
if (priv->media_connected)
mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
HostCmd_ACT_GEN_SET, 0,
adapter->event_body, false);
break;
case EVENT_DELBA:
- dev_dbg(adapter->dev, "event: DELBA Request\n");
+ mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n");
if (priv->media_connected)
mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
break;
case EVENT_BA_STREAM_TIEMOUT:
- dev_dbg(adapter->dev, "event: BA Stream timeout\n");
+ mwifiex_dbg(adapter, EVENT, "event: BA Stream timeout\n");
if (priv->media_connected) {
ba_timeout = (void *)adapter->event_body;
mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
}
break;
case EVENT_EXT_SCAN_REPORT:
- dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+ mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
if (adapter->ext_scan)
return mwifiex_handle_event_ext_scan_report(priv,
adapter->event_skb->data);
break;
case EVENT_TX_STATUS_REPORT:
- dev_dbg(adapter->dev, "event: TX_STATUS Report\n");
+ mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
mwifiex_parse_tx_status_event(priv, adapter->event_body);
break;
case EVENT_PS_SLEEP:
- dev_dbg(adapter->dev, "info: EVENT: SLEEP\n");
+ mwifiex_dbg(adapter, EVENT, "info: EVENT: SLEEP\n");
adapter->ps_state = PS_STATE_PRE_SLEEP;
@@ -187,12 +191,12 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
break;
case EVENT_PS_AWAKE:
- dev_dbg(adapter->dev, "info: EVENT: AWAKE\n");
+ mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
if (!adapter->pps_uapsd_mode &&
priv->media_connected && adapter->sleep_period.period) {
adapter->pps_uapsd_mode = true;
- dev_dbg(adapter->dev,
- "event: PPS/UAPSD mode activated\n");
+ mwifiex_dbg(adapter, EVENT,
+ "event: PPS/UAPSD mode activated\n");
}
adapter->tx_lock_flag = false;
if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
@@ -218,16 +222,16 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
break;
case EVENT_CHANNEL_REPORT_RDY:
- dev_dbg(adapter->dev, "event: Channel Report\n");
+ mwifiex_dbg(adapter, EVENT, "event: Channel Report\n");
mwifiex_11h_handle_chanrpt_ready(priv, adapter->event_skb);
break;
case EVENT_RADAR_DETECTED:
- dev_dbg(adapter->dev, "event: Radar detected\n");
+ mwifiex_dbg(adapter, EVENT, "event: Radar detected\n");
mwifiex_11h_handle_radar_detected(priv, adapter->event_skb);
break;
default:
- dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
- eventcause);
+ mwifiex_dbg(adapter, EVENT,
+ "event: unknown event id: %#x\n", eventcause);
break;
}
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 38ac4d74c486..61c52fdf945d 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -103,8 +103,8 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
if ((atomic_read(&adapter->pending_bridged_pkts) >=
MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
- dev_err(priv->adapter->dev,
- "Tx: Bridge packet limit reached. Drop packet!\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Tx: Bridge packet limit reached. Drop packet!\n");
kfree_skb(skb);
mwifiex_uap_cleanup_tx_queues(priv);
return;
@@ -153,15 +153,15 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
skb_pull(skb, hdr_chop);
if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
- dev_dbg(priv->adapter->dev,
- "data: Tx: insufficient skb headroom %d\n",
- skb_headroom(skb));
+ mwifiex_dbg(priv->adapter, ERROR,
+ "data: Tx: insufficient skb headroom %d\n",
+ skb_headroom(skb));
/* Insufficient skb headroom - allocate a new skb */
new_skb =
skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
if (unlikely(!new_skb)) {
- dev_err(priv->adapter->dev,
- "Tx: cannot allocate new_skb\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Tx: cannot allocate new_skb\n");
kfree_skb(skb);
priv->stats.tx_dropped++;
return;
@@ -169,8 +169,9 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
kfree_skb(skb);
skb = new_skb;
- dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n",
- skb_headroom(skb));
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: new skb headroom %d\n",
+ skb_headroom(skb));
}
tx_info = MWIFIEX_SKB_TXCB(skb);
@@ -225,7 +226,8 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
/* don't do packet forwarding in disconnected state */
if (!priv->media_connected) {
- dev_err(adapter->dev, "drop packet in disconnected state.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "drop packet in disconnected state.\n");
dev_kfree_skb_any(skb);
return 0;
}
@@ -275,10 +277,10 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
- dev_err(adapter->dev,
- "wrong rx packet: len=%d, offset=%d, length=%d\n",
- skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
- le16_to_cpu(uap_rx_pd->rx_pkt_length));
+ mwifiex_dbg(adapter, ERROR,
+ "wrong rx packet: len=%d, offset=%d, length=%d\n",
+ skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
+ le16_to_cpu(uap_rx_pd->rx_pkt_length));
priv->stats.rx_dropped++;
dev_kfree_skb_any(skb);
return 0;
@@ -287,7 +289,8 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
if (rx_pkt_type == PKT_TYPE_MGMT) {
ret = mwifiex_process_mgmt_packet(priv, skb);
if (ret)
- dev_err(adapter->dev, "Rx of mgmt packet failed");
+ mwifiex_dbg(adapter, ERROR,
+ "Rx of mgmt packet failed");
dev_kfree_skb_any(skb);
return ret;
}
@@ -354,7 +357,8 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
INTF_HEADER_LEN;
if (!skb->len) {
- dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
+ mwifiex_dbg(adapter, ERROR,
+ "Tx: bad packet length: %d\n", skb->len);
tx_info->status_code = -1;
return skb->data;
}
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index fd8027f200a0..aada93425f80 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -60,7 +60,6 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size);
static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
struct sk_buff *skb, u8 ep)
{
- struct device *dev = adapter->dev;
u32 recv_type;
__le32 tmp;
int ret;
@@ -69,13 +68,15 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
mwifiex_process_hs_config(adapter);
if (skb->len < INTF_HEADER_LEN) {
- dev_err(dev, "%s: invalid skb->len\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: invalid skb->len\n", __func__);
return -1;
}
switch (ep) {
case MWIFIEX_USB_EP_CMD_EVENT:
- dev_dbg(dev, "%s: EP_CMD_EVENT\n", __func__);
+ mwifiex_dbg(adapter, EVENT,
+ "%s: EP_CMD_EVENT\n", __func__);
skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN);
recv_type = le32_to_cpu(tmp);
skb_pull(skb, INTF_HEADER_LEN);
@@ -83,11 +84,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
switch (recv_type) {
case MWIFIEX_USB_TYPE_CMD:
if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
- dev_err(dev, "CMD: skb->len too large\n");
+ mwifiex_dbg(adapter, ERROR,
+ "CMD: skb->len too large\n");
ret = -1;
goto exit_restore_skb;
} else if (!adapter->curr_cmd) {
- dev_dbg(dev, "CMD: no curr_cmd\n");
+ mwifiex_dbg(adapter, WARN, "CMD: no curr_cmd\n");
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
mwifiex_process_sleep_confirm_resp(
adapter, skb->data,
@@ -104,16 +106,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
break;
case MWIFIEX_USB_TYPE_EVENT:
if (skb->len < sizeof(u32)) {
- dev_err(dev, "EVENT: skb->len too small\n");
+ mwifiex_dbg(adapter, ERROR,
+ "EVENT: skb->len too small\n");
ret = -1;
goto exit_restore_skb;
}
skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
adapter->event_cause = le32_to_cpu(tmp);
- dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
+ mwifiex_dbg(adapter, EVENT,
+ "event_cause %#x\n", adapter->event_cause);
if (skb->len > MAX_EVENT_SIZE) {
- dev_err(dev, "EVENT: event body too large\n");
+ mwifiex_dbg(adapter, ERROR,
+ "EVENT: event body too large\n");
ret = -1;
goto exit_restore_skb;
}
@@ -125,14 +130,16 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
adapter->event_skb = skb;
break;
default:
- dev_err(dev, "unknown recv_type %#x\n", recv_type);
+ mwifiex_dbg(adapter, ERROR,
+ "unknown recv_type %#x\n", recv_type);
return -1;
}
break;
case MWIFIEX_USB_EP_DATA:
- dev_dbg(dev, "%s: EP_DATA\n", __func__);
+ mwifiex_dbg(adapter, DATA, "%s: EP_DATA\n", __func__);
if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) {
- dev_err(dev, "DATA: skb->len too large\n");
+ mwifiex_dbg(adapter, ERROR,
+ "DATA: skb->len too large\n");
return -1;
}
@@ -141,7 +148,8 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
atomic_inc(&adapter->rx_pending);
break;
default:
- dev_err(dev, "%s: unknown endport %#x\n", __func__, ep);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: unknown endport %#x\n", __func__, ep);
return -1;
}
@@ -176,8 +184,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
if (recv_length) {
if (urb->status || (adapter->surprise_removed)) {
- dev_err(adapter->dev,
- "URB status is failed: %d\n", urb->status);
+ mwifiex_dbg(adapter, ERROR,
+ "URB status is failed: %d\n", urb->status);
/* Do not free skb in case of command ep */
if (card->rx_cmd_ep != context->ep)
dev_kfree_skb_any(skb);
@@ -190,8 +198,9 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
status = mwifiex_usb_recv(adapter, skb, context->ep);
- dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
- recv_length, status);
+ mwifiex_dbg(adapter, INFO,
+ "info: recv_length=%d, status=%d\n",
+ recv_length, status);
if (status == -EINPROGRESS) {
mwifiex_queue_main_work(adapter);
@@ -203,8 +212,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
return;
} else {
if (status == -1)
- dev_err(adapter->dev,
- "received data processing failed!\n");
+ mwifiex_dbg(adapter, ERROR,
+ "received data processing failed!\n");
/* Do not free skb in case of command ep */
if (card->rx_cmd_ep != context->ep)
@@ -212,8 +221,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
}
} else if (urb->status) {
if (!adapter->is_suspended) {
- dev_warn(adapter->dev,
- "Card is removed: %d\n", urb->status);
+ mwifiex_dbg(adapter, FATAL,
+ "Card is removed: %d\n", urb->status);
adapter->surprise_removed = true;
}
dev_kfree_skb_any(skb);
@@ -249,14 +258,17 @@ static void mwifiex_usb_tx_complete(struct urb *urb)
struct mwifiex_adapter *adapter = context->adapter;
struct usb_card_rec *card = adapter->card;
- dev_dbg(adapter->dev, "%s: status: %d\n", __func__, urb->status);
+ mwifiex_dbg(adapter, INFO,
+ "%s: status: %d\n", __func__, urb->status);
if (context->ep == card->tx_cmd_ep) {
- dev_dbg(adapter->dev, "%s: CMD\n", __func__);
+ mwifiex_dbg(adapter, CMD,
+ "%s: CMD\n", __func__);
atomic_dec(&card->tx_cmd_urb_pending);
adapter->cmd_sent = false;
} else {
- dev_dbg(adapter->dev, "%s: DATA\n", __func__);
+ mwifiex_dbg(adapter, DATA,
+ "%s: DATA\n", __func__);
atomic_dec(&card->tx_data_urb_pending);
mwifiex_write_data_complete(adapter, context->skb, 0,
urb->status ? -1 : 0);
@@ -275,8 +287,8 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size)
if (card->rx_cmd_ep != ctx->ep) {
ctx->skb = dev_alloc_skb(size);
if (!ctx->skb) {
- dev_err(adapter->dev,
- "%s: dev_alloc_skb failed\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: dev_alloc_skb failed\n", __func__);
return -ENOMEM;
}
}
@@ -291,7 +303,7 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size)
atomic_inc(&card->rx_data_urb_pending);
if (usb_submit_urb(ctx->urb, GFP_ATOMIC)) {
- dev_err(adapter->dev, "usb_submit_urb failed\n");
+ mwifiex_dbg(adapter, ERROR, "usb_submit_urb failed\n");
dev_kfree_skb_any(ctx->skb);
ctx->skb = NULL;
@@ -468,7 +480,8 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
adapter = card->adapter;
if (unlikely(adapter->is_suspended))
- dev_warn(adapter->dev, "Device already suspended\n");
+ mwifiex_dbg(adapter, WARN,
+ "Device already suspended\n");
mwifiex_enable_hs(adapter);
@@ -519,7 +532,8 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
adapter = card->adapter;
if (unlikely(!adapter->is_suspended)) {
- dev_warn(adapter->dev, "Device already resumed\n");
+ mwifiex_dbg(adapter, WARN,
+ "Device already resumed\n");
return 0;
}
@@ -578,7 +592,8 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
mwifiex_usb_free(card);
- dev_dbg(adapter->dev, "%s: removing card\n", __func__);
+ mwifiex_dbg(adapter, FATAL,
+ "%s: removing card\n", __func__);
mwifiex_remove_card(adapter, &add_remove_card_sem);
usb_set_intfdata(intf, NULL);
@@ -608,7 +623,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
if (!card->tx_cmd.urb) {
- dev_err(adapter->dev, "tx_cmd.urb allocation failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "tx_cmd.urb allocation failed\n");
return -ENOMEM;
}
@@ -620,8 +636,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
card->tx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
if (!card->tx_data_list[i].urb) {
- dev_err(adapter->dev,
- "tx_data_list[] urb allocation failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "tx_data_list[] urb allocation failed\n");
return -ENOMEM;
}
}
@@ -639,15 +655,13 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter)
card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
if (!card->rx_cmd.urb) {
- dev_err(adapter->dev, "rx_cmd.urb allocation failed\n");
+ mwifiex_dbg(adapter, ERROR, "rx_cmd.urb allocation failed\n");
return -ENOMEM;
}
card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
- if (!card->rx_cmd.skb) {
- dev_err(adapter->dev, "rx_cmd.skb allocation failed\n");
+ if (!card->rx_cmd.skb)
return -ENOMEM;
- }
if (mwifiex_usb_submit_rx_urb(&card->rx_cmd, MWIFIEX_RX_CMD_BUF_SIZE))
return -1;
@@ -658,8 +672,8 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter)
card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
if (!card->rx_data_list[i].urb) {
- dev_err(adapter->dev,
- "rx_data_list[] urb allocation failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "rx_data_list[] urb allocation failed\n");
return -1;
}
if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i],
@@ -683,7 +697,8 @@ static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
ret = usb_bulk_msg(card->udev, usb_sndbulkpipe(card->udev, ep), pbuf,
*len, &actual_length, timeout);
if (ret) {
- dev_err(adapter->dev, "usb_bulk_msg for tx failed: %d\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "usb_bulk_msg for tx failed: %d\n", ret);
return ret;
}
@@ -702,7 +717,8 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
ret = usb_bulk_msg(card->udev, usb_rcvbulkpipe(card->udev, ep), pbuf,
*len, &actual_length, timeout);
if (ret) {
- dev_err(adapter->dev, "usb_bulk_msg for rx failed: %d\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "usb_bulk_msg for rx failed: %d\n", ret);
return ret;
}
@@ -722,13 +738,13 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
struct urb *tx_urb;
if (adapter->is_suspended) {
- dev_err(adapter->dev,
- "%s: not allowed while suspended\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: not allowed while suspended\n", __func__);
return -1;
}
if (adapter->surprise_removed) {
- dev_err(adapter->dev, "%s: device removed\n", __func__);
+ mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__);
return -1;
}
@@ -737,7 +753,7 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
return -EBUSY;
}
- dev_dbg(adapter->dev, "%s: ep=%d\n", __func__, ep);
+ mwifiex_dbg(adapter, INFO, "%s: ep=%d\n", __func__, ep);
if (ep == card->tx_cmd_ep) {
context = &card->tx_cmd;
@@ -764,7 +780,8 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
atomic_inc(&card->tx_data_urb_pending);
if (usb_submit_urb(tx_urb, GFP_ATOMIC)) {
- dev_err(adapter->dev, "%s: usb_submit_urb failed\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: usb_submit_urb failed\n", __func__);
if (ep == card->tx_cmd_ep) {
atomic_dec(&card->tx_cmd_urb_pending);
} else {
@@ -843,8 +860,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
u8 check_winner = 1;
if (!firmware) {
- dev_err(adapter->dev,
- "No firmware image found! Terminating download\n");
+ mwifiex_dbg(adapter, ERROR,
+ "No firmware image found! Terminating download\n");
ret = -1;
goto fw_exit;
}
@@ -889,8 +906,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
MWIFIEX_USB_EP_CMD_EVENT,
MWIFIEX_USB_TIMEOUT);
if (ret) {
- dev_err(adapter->dev,
- "write_data_sync: failed: %d\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "write_data_sync: failed: %d\n",
+ ret);
continue;
}
@@ -902,8 +920,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
MWIFIEX_USB_EP_CMD_EVENT,
MWIFIEX_USB_TIMEOUT);
if (ret) {
- dev_err(adapter->dev,
- "read_data_sync: failed: %d\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "read_data_sync: failed: %d\n",
+ ret);
continue;
}
@@ -913,17 +932,17 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
/* check 1st firmware block resp for highest bit set */
if (check_winner) {
if (le32_to_cpu(sync_fw.cmd) & 0x80000000) {
- dev_warn(adapter->dev,
- "USB is not the winner %#x\n",
- sync_fw.cmd);
+ mwifiex_dbg(adapter, WARN,
+ "USB is not the winner %#x\n",
+ sync_fw.cmd);
/* returning success */
ret = 0;
goto cleanup;
}
- dev_dbg(adapter->dev,
- "USB is the winner, start to download FW\n");
+ mwifiex_dbg(adapter, MSG,
+ "start to download FW...\n");
check_winner = 0;
break;
@@ -931,9 +950,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
/* check the firmware block response for CRC errors */
if (sync_fw.cmd) {
- dev_err(adapter->dev,
- "FW received block with CRC %#x\n",
- sync_fw.cmd);
+ mwifiex_dbg(adapter, ERROR,
+ "FW received block with CRC %#x\n",
+ sync_fw.cmd);
ret = -1;
continue;
}
@@ -945,8 +964,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
} while ((dnld_cmd != FW_HAS_LAST_BLOCK) && retries);
cleanup:
- dev_notice(adapter->dev,
- "info: FW download over, size %d bytes\n", tlen);
+ mwifiex_dbg(adapter, MSG,
+ "info: FW download over, size %d bytes\n", tlen);
kfree(recv_buff);
kfree(fwdata);
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index b8a45872354d..370323a47ecb 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -26,6 +26,8 @@
#include "11n.h"
static struct mwifiex_debug_data items[] = {
+ {"debug_mask", item_size(debug_mask),
+ item_addr(debug_mask), 1},
{"int_counter", item_size(int_counter),
item_addr(int_counter), 1},
{"wmm_ac_vo", item_size(packets_out[WMM_AC_VO]),
@@ -158,7 +160,8 @@ int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
} else if (func_init_shutdown == MWIFIEX_FUNC_SHUTDOWN) {
cmd = HostCmd_CMD_FUNC_SHUTDOWN;
} else {
- dev_err(priv->adapter->dev, "unsupported parameter\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "unsupported parameter\n");
return -1;
}
@@ -178,6 +181,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
if (info) {
+ info->debug_mask = adapter->debug_mask;
memcpy(info->packets_out,
priv->wmm.packets_out,
sizeof(priv->wmm.packets_out));
@@ -336,9 +340,9 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
action_code = *(payload + sizeof(struct ieee80211_hdr) + 1);
if (category == WLAN_CATEGORY_PUBLIC &&
action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
- dev_dbg(priv->adapter->dev,
- "TDLS discovery response %pM nf=%d, snr=%d\n",
- ieee_hdr->addr2, rx_pd->nf, rx_pd->snr);
+ mwifiex_dbg(priv->adapter, INFO,
+ "TDLS discovery response %pM nf=%d, snr=%d\n",
+ ieee_hdr->addr2, rx_pd->nf, rx_pd->snr);
mwifiex_auto_tdls_update_peer_signal(priv,
ieee_hdr->addr2,
rx_pd->snr,
@@ -346,8 +350,8 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
}
break;
default:
- dev_dbg(priv->adapter->dev,
- "unknown mgmt frame subytpe %#x\n", stype);
+ mwifiex_dbg(priv->adapter, INFO,
+ "unknown mgmt frame subtype %#x\n", stype);
}
return 0;
@@ -369,8 +373,8 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
if (!priv->mgmt_frame_mask ||
priv->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) {
- dev_dbg(priv->adapter->dev,
- "do not receive mgmt frames on uninitialized intf");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "do not receive mgmt frames on uninitialized intf");
return -1;
}
@@ -464,13 +468,14 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node)
{
- dev_dbg(adapter->dev, "cmd completed: status=%d\n",
- adapter->cmd_wait_q.status);
+ mwifiex_dbg(adapter, CMD,
+ "cmd completed: status=%d\n",
+ adapter->cmd_wait_q.status);
*(cmd_node->condition) = true;
if (adapter->cmd_wait_q.status == -ETIMEDOUT)
- dev_err(adapter->dev, "cmd timeout\n");
+ mwifiex_dbg(adapter, ERROR, "cmd timeout\n");
else
wake_up_interruptible(&adapter->cmd_wait_q.wait);
@@ -536,13 +541,16 @@ void
mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
int ies_len, struct mwifiex_sta_node *node)
{
+ struct ieee_types_header *ht_cap_ie;
const struct ieee80211_ht_cap *ht_cap;
if (!ies)
return;
- ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
- if (ht_cap) {
+ ht_cap_ie = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies,
+ ies_len);
+ if (ht_cap_ie) {
+ ht_cap = (void *)(ht_cap_ie + 1);
node->is_11n_enabled = 1;
node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
IEEE80211_HT_CAP_MAX_AMSDU ?
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index b2e99569a0f8..a8ea21c3340c 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -107,7 +107,7 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
ra_list->total_pkt_count = 0;
- dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
+ mwifiex_dbg(adapter, INFO, "info: allocated ra_list %p\n", ra_list);
return ra_list;
}
@@ -150,7 +150,8 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
for (i = 0; i < MAX_NUM_TID; ++i) {
ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
- dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list);
+ mwifiex_dbg(adapter, INFO,
+ "info: created ra_list %p\n", ra_list);
if (!ra_list)
break;
@@ -178,8 +179,8 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
- dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
- ra_list, ra_list->is_11n_enabled);
+ mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n",
+ ra_list, ra_list->is_11n_enabled);
if (ra_list->is_11n_enabled) {
ra_list->ba_pkt_count = 0;
@@ -241,11 +242,12 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
return;
}
- dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, "
- "qos_info Parameter Set Count=%d, Reserved=%#x\n",
- wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
- IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
- wmm_ie->reserved);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: WMM Parameter IE: version=%d,\t"
+ "qos_info Parameter Set Count=%d, Reserved=%#x\n",
+ wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
+ IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
+ wmm_ie->reserved);
for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
@@ -257,10 +259,10 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
priv->wmm.queue_priority[ac_idx] = ac_idx;
tmp[ac_idx] = avg_back_off;
- dev_dbg(priv->adapter->dev,
- "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
- (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
- cw_min, avg_back_off);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
+ (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
+ cw_min, avg_back_off);
mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
}
@@ -333,8 +335,8 @@ mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
{
int ac_val;
- dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:"
- "BK(0), BE(1), VI(2), VO(3)\n");
+ mwifiex_dbg(priv->adapter, INFO, "info: WMM: AC Priorities:\t"
+ "BK(0), BE(1), VI(2), VO(3)\n");
if (!priv->wmm_enabled) {
/* WMM is not enabled, default priorities */
@@ -346,9 +348,10 @@ mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
priv->wmm.ac_down_graded_vals[ac_val]
= mwifiex_wmm_eval_downgrade_ac(priv,
(enum mwifiex_wmm_ac_e) ac_val);
- dev_dbg(priv->adapter->dev,
- "info: WMM: AC PRIO %d maps to %d\n",
- ac_val, priv->wmm.ac_down_graded_vals[ac_val]);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: WMM: AC PRIO %d maps to %d\n",
+ ac_val,
+ priv->wmm.ac_down_graded_vals[ac_val]);
}
}
}
@@ -428,6 +431,15 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
priv->tos_to_tid_inv[i];
}
+ priv->aggr_prio_tbl[6].amsdu
+ = priv->aggr_prio_tbl[6].ampdu_ap
+ = priv->aggr_prio_tbl[6].ampdu_user
+ = BA_STREAM_NOT_ALLOWED;
+
+ priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
+ = priv->aggr_prio_tbl[7].ampdu_user
+ = BA_STREAM_NOT_ALLOWED;
+
mwifiex_set_ba_params(priv);
mwifiex_reset_11n_rx_seq_num(priv);
@@ -512,8 +524,8 @@ static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
int i;
for (i = 0; i < MAX_NUM_TID; ++i) {
- dev_dbg(priv->adapter->dev,
- "info: ra_list: freeing buf for tid %d\n", i);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ra_list: freeing buf for tid %d\n", i);
list_for_each_entry_safe(ra_list, tmp_node,
&priv->wmm.tid_tbl_ptr[i].ra_list,
list) {
@@ -685,14 +697,15 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
- dev_dbg(adapter->dev,
- "TDLS setup packet for %pM. Don't block\n", ra);
+ mwifiex_dbg(adapter, DATA,
+ "TDLS setup packet for %pM.\t"
+ "Don't block\n", ra);
else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
tdls_status = mwifiex_get_tdls_link_status(priv, ra);
}
if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
- dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
+ mwifiex_dbg(adapter, DATA, "data: drop packet in disconnect\n");
mwifiex_write_data_complete(adapter, skb, 0, -1);
return;
}
@@ -773,6 +786,7 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
{
u8 *curr = (u8 *) &resp->params.get_wmm_status;
uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
+ int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK;
bool valid = true;
struct mwifiex_ie_types_data *tlv_hdr;
@@ -780,8 +794,9 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
struct mwifiex_wmm_ac_status *ac_status;
- dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
- resp_len);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
+ resp_len);
while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
tlv_hdr = (struct mwifiex_ie_types_data *) curr;
@@ -795,12 +810,12 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
tlv_wmm_qstatus =
(struct mwifiex_ie_types_wmm_queue_status *)
tlv_hdr;
- dev_dbg(priv->adapter->dev,
- "info: CMD_RESP: WMM_GET_STATUS:"
- " QSTATUS TLV: %d, %d, %d\n",
- tlv_wmm_qstatus->queue_index,
- tlv_wmm_qstatus->flow_required,
- tlv_wmm_qstatus->disabled);
+ mwifiex_dbg(priv->adapter, CMD,
+ "info: CMD_RESP: WMM_GET_STATUS:\t"
+ "QSTATUS TLV: %d, %d, %d\n",
+ tlv_wmm_qstatus->queue_index,
+ tlv_wmm_qstatus->flow_required,
+ tlv_wmm_qstatus->disabled);
ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
queue_index];
@@ -823,11 +838,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
wmm_param_ie->vend_hdr.element_id =
WLAN_EID_VENDOR_SPECIFIC;
- dev_dbg(priv->adapter->dev,
- "info: CMD_RESP: WMM_GET_STATUS:"
- " WMM Parameter Set Count: %d\n",
- wmm_param_ie->qos_info_bitmap &
- IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK);
+ mwifiex_dbg(priv->adapter, CMD,
+ "info: CMD_RESP: WMM_GET_STATUS:\t"
+ "WMM Parameter Set Count: %d\n",
+ wmm_param_ie->qos_info_bitmap & mask);
memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
wmm_ie, wmm_param_ie,
@@ -875,9 +889,9 @@ mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
if (!wmm_ie)
return 0;
- dev_dbg(priv->adapter->dev,
- "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
- wmm_ie->vend_hdr.element_id);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
+ wmm_ie->vend_hdr.element_id);
if ((priv->wmm_required ||
(ht_cap && (priv->adapter->config_bands & BAND_GN ||
@@ -927,8 +941,8 @@ mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
*/
ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
- dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms,"
- " %d ms sent to FW\n", queue_delay, ret_val);
+ mwifiex_dbg(priv->adapter, DATA, "data: WMM: Pkt Delay: %d ms,\t"
+ "%d ms sent to FW\n", queue_delay, ret_val);
return ret_val;
}
@@ -1082,14 +1096,15 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
if (skb_queue_empty(&ptr->skb_head)) {
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
- dev_dbg(adapter->dev, "data: nothing to send\n");
+ mwifiex_dbg(adapter, DATA, "data: nothing to send\n");
return;
}
skb = skb_dequeue(&ptr->skb_head);
tx_info = MWIFIEX_SKB_TXCB(skb);
- dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
+ mwifiex_dbg(adapter, DATA,
+ "data: dequeuing the packet %p %p\n", ptr, skb);
ptr->total_pkt_count--;
@@ -1205,7 +1220,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
switch (ret) {
case -EBUSY:
- dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+ mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
@@ -1224,7 +1239,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
case -1:
if (adapter->iface_type != MWIFIEX_PCIE)
adapter->data_sent = false;
- dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
+ mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb, 0, ret);
break;
@@ -1263,7 +1278,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
tid = mwifiex_get_tid(ptr);
- dev_dbg(adapter->dev, "data: tid=%d\n", tid);
+ mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 95921167b53f..b71fc74d14ab 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -5192,7 +5192,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
priv->sniffer_enabled = true;
}
- *total_flags &= FIF_PROMISC_IN_BSS | FIF_ALLMULTI |
+ *total_flags &= FIF_ALLMULTI |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL |
FIF_OTHER_BSS;
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 275408eaf95e..257a9eadd595 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -351,8 +351,7 @@ int p54_setup_mac(struct p54_common *priv)
* "TRANSPARENT and PROMISCUOUS are mutually exclusive"
* STSW45X0C LMAC API - page 12
*/
- if (((priv->filter_flags & FIF_PROMISC_IN_BSS) ||
- (priv->filter_flags & FIF_OTHER_BSS)) &&
+ if (priv->filter_flags & FIF_OTHER_BSS &&
(mode != P54_FILTER_TYPE_PROMISCUOUS))
mode |= P54_FILTER_TYPE_TRANSPARENT;
} else {
diff --git a/drivers/net/wireless/p54/led.c b/drivers/net/wireless/p54/led.c
index 1f6fd5ff5531..9a8fedd3c0f5 100644
--- a/drivers/net/wireless/p54/led.c
+++ b/drivers/net/wireless/p54/led.c
@@ -83,7 +83,7 @@ static void p54_led_brightness_set(struct led_classdev *led_dev,
static int p54_register_led(struct p54_common *priv,
unsigned int led_index,
- char *name, char *trigger)
+ char *name, const char *trigger)
{
struct p54_led_dev *led = &priv->leds[led_index];
int err;
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index e79674f73dc5..2947ad21053c 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -395,13 +395,11 @@ static void p54_configure_filter(struct ieee80211_hw *dev,
{
struct p54_common *priv = dev->priv;
- *total_flags &= FIF_PROMISC_IN_BSS |
- FIF_ALLMULTI |
- FIF_OTHER_BSS;
+ *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS;
priv->filter_flags = *total_flags;
- if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
+ if (changed_flags & FIF_OTHER_BSS)
p54_setup_mac(priv);
if (changed_flags & FIF_ALLMULTI || multicast)
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 477f86354dc5..0881ba8535f4 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -143,7 +143,7 @@ static int psm;
static char *essid;
/* Default to encapsulation unless translation requested */
-static bool translate = 1;
+static bool translate = true;
static int country = USA;
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index d72ff8e7125d..71a825c750cf 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -356,9 +356,9 @@ struct ndis_80211_pmkid {
#define CAP_MODE_80211G 4
#define CAP_MODE_MASK 7
-#define WORK_LINK_UP (1<<0)
-#define WORK_LINK_DOWN (1<<1)
-#define WORK_SET_MULTICAST_LIST (1<<2)
+#define WORK_LINK_UP 0
+#define WORK_LINK_DOWN 1
+#define WORK_SET_MULTICAST_LIST 2
#define RNDIS_WLAN_ALG_NONE 0
#define RNDIS_WLAN_ALG_WEP (1<<0)
@@ -2861,7 +2861,7 @@ static void rndis_wlan_do_link_down_work(struct usbnet *usbdev)
deauthenticate(usbdev);
- cfg80211_disconnected(usbdev->net, 0, NULL, 0, GFP_KERNEL);
+ cfg80211_disconnected(usbdev->net, 0, NULL, 0, true, GFP_KERNEL);
}
netif_carrier_off(usbdev->net);
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index bdf5590ba304..7da138892026 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -273,10 +273,8 @@ static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
!(filter_flags & FIF_CONTROL));
- rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
- !(filter_flags & FIF_PROMISC_IN_BSS));
+ rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
- !(filter_flags & FIF_PROMISC_IN_BSS) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 79f4fe65a119..4ea53aa9ede3 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -274,10 +274,8 @@ static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
!(filter_flags & FIF_CONTROL));
- rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
- !(filter_flags & FIF_PROMISC_IN_BSS));
+ rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
- !(filter_flags & FIF_PROMISC_IN_BSS) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 05c64597838d..237bbb54c7a8 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -434,10 +434,8 @@ static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL,
!(filter_flags & FIF_CONTROL));
- rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME,
- !(filter_flags & FIF_PROMISC_IN_BSS));
+ rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME, 1);
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS,
- !(filter_flags & FIF_PROMISC_IN_BSS) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1);
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST,
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index be2d54f257b1..dfeca8355b22 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1513,8 +1513,7 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_FCSFAIL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
!(filter_flags & FIF_PLCPFAIL));
- rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
- !(filter_flags & FIF_PROMISC_IN_BSS));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME, 1);
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
@@ -7818,21 +7817,25 @@ EXPORT_SYMBOL_GPL(rt2800_probe_hw);
/*
* IEEE80211 stack callback functions.
*/
-void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
- u16 *iv16)
+void rt2800_get_key_seq(struct ieee80211_hw *hw,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct mac_iveiv_entry iveiv_entry;
u32 offset;
- offset = MAC_IVEIV_ENTRY(hw_key_idx);
+ if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
+ return;
+
+ offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
rt2800_register_multiread(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
- memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16));
- memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32));
+ memcpy(&seq->tkip.iv16, &iveiv_entry.iv[0], 2);
+ memcpy(&seq->tkip.iv32, &iveiv_entry.iv[4], 4);
}
-EXPORT_SYMBOL_GPL(rt2800_get_tkip_seq);
+EXPORT_SYMBOL_GPL(rt2800_get_key_seq);
int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 3019db637a4b..1609b8a7f7eb 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -209,8 +209,9 @@ int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev);
-void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
- u16 *iv16);
+void rt2800_get_key_seq(struct ieee80211_hw *hw,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq);
int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
int rt2800_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue_idx,
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index cc1b3cc73c5a..0af22573a2eb 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -309,7 +309,7 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
.sw_scan_start = rt2x00mac_sw_scan_start,
.sw_scan_complete = rt2x00mac_sw_scan_complete,
.get_stats = rt2x00mac_get_stats,
- .get_tkip_seq = rt2800_get_tkip_seq,
+ .get_key_seq = rt2800_get_key_seq,
.set_rts_threshold = rt2800_set_rts_threshold,
.sta_add = rt2x00mac_sta_add,
.sta_remove = rt2x00mac_sta_remove,
diff --git a/drivers/net/wireless/rt2x00/rt2800soc.c b/drivers/net/wireless/rt2x00/rt2800soc.c
index aaa7aa4cad9d..a985a5a7945e 100644
--- a/drivers/net/wireless/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/rt2x00/rt2800soc.c
@@ -148,7 +148,7 @@ static const struct ieee80211_ops rt2800soc_mac80211_ops = {
.sw_scan_start = rt2x00mac_sw_scan_start,
.sw_scan_complete = rt2x00mac_sw_scan_complete,
.get_stats = rt2x00mac_get_stats,
- .get_tkip_seq = rt2800_get_tkip_seq,
+ .get_key_seq = rt2800_get_key_seq,
.set_rts_threshold = rt2800_set_rts_threshold,
.sta_add = rt2x00mac_sta_add,
.sta_remove = rt2x00mac_sta_remove,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 6ec2466b52b6..5932306084fd 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -835,7 +835,7 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = {
.sw_scan_start = rt2x00mac_sw_scan_start,
.sw_scan_complete = rt2x00mac_sw_scan_complete,
.get_stats = rt2x00mac_get_stats,
- .get_tkip_seq = rt2800_get_tkip_seq,
+ .get_key_seq = rt2800_get_key_seq,
.set_rts_threshold = rt2800_set_rts_threshold,
.sta_add = rt2x00mac_sta_add,
.sta_remove = rt2x00mac_sta_remove,
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 300876df056f..1b8a459a412b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -359,8 +359,7 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
FIF_PLCPFAIL |
FIF_CONTROL |
FIF_PSPOLL |
- FIF_OTHER_BSS |
- FIF_PROMISC_IN_BSS;
+ FIF_OTHER_BSS;
/*
* Apply some rules to the filters:
@@ -369,9 +368,6 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
* - Multicast filter seems to kill broadcast traffic so never use it.
*/
*total_flags |= FIF_ALLMULTI;
- if (*total_flags & FIF_OTHER_BSS ||
- *total_flags & FIF_PROMISC_IN_BSS)
- *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS;
/*
* If the device has a single filter for all control frames,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 819455009fe4..c8a967247a9a 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -530,10 +530,8 @@ static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
!(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
- rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
- !(filter_flags & FIF_PROMISC_IN_BSS));
+ rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
- !(filter_flags & FIF_PROMISC_IN_BSS) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index a5458cf01fb2..65ce3afb888a 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -480,10 +480,8 @@ static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
!(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
- rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
- !(filter_flags & FIF_PROMISC_IN_BSS));
+ rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
- !(filter_flags & FIF_PROMISC_IN_BSS) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 5cf509d346e8..73067cac289c 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -100,7 +100,7 @@ config RTL8821AE
select RTLWIFI_PCI
select RTLBTCOEXIST
---help---
- This is the driver for Realtek RTL8i821AE/RTL8812AE 802.11av PCIe
+ This is the driver for Realtek RTL8821AE/RTL8812AE 802.11ac PCIe
wireless network adapters.
If you choose to build it as a module, it will be called rtl8821ae
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
index cefe26991421..f2b9d11adc9e 100644
--- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -1286,8 +1286,11 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
0x12, 0xe1, 0x90);
break;
case 3:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
- 0x3, 0xf1, 0x90);
+ /* This call breaks BT when wireless is active -
+ * comment it out for now until a better fix is found:
+ * btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ * 0x3, 0xf1, 0x90);
+ */
break;
case 4:
btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index 82733c6b8c46..782ac2fc4b28 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -27,8 +27,7 @@
#define __RTL_CORE_H__
#define RTL_SUPPORTED_FILTERS \
- (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | FIF_CONTROL | \
+ (FIF_ALLMULTI | FIF_CONTROL | \
FIF_OTHER_BSS | \
FIF_FCSFAIL | \
FIF_BCN_PRBRESP_PROMISC)
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
index 1893d01b9e78..a62bf0a65c32 100644
--- a/drivers/net/wireless/rtlwifi/regd.c
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -40,6 +40,7 @@ static struct country_code_to_enum_rd allCountries[] = {
{COUNTRY_CODE_GLOBAL_DOMAIN, "JP"},
{COUNTRY_CODE_WORLD_WIDE_13, "EC"},
{COUNTRY_CODE_TELEC_NETGEAR, "EC"},
+ {COUNTRY_CODE_WORLD_WIDE_13_5G_ALL, "US"},
};
/*
@@ -124,6 +125,17 @@ static const struct ieee80211_regdomain rtl_regdom_14_60_64 = {
}
};
+static const struct ieee80211_regdomain rtl_regdom_12_13_5g_all = {
+ .n_reg_rules = 4,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_2GHZ_CH12_13,
+ RTL819x_5GHZ_5150_5350,
+ RTL819x_5GHZ_5470_5850,
+ }
+};
+
static const struct ieee80211_regdomain rtl_regdom_14 = {
.n_reg_rules = 3,
.alpha2 = "99",
@@ -348,6 +360,8 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
return &rtl_regdom_14_60_64;
case COUNTRY_CODE_GLOBAL_DOMAIN:
return &rtl_regdom_14;
+ case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
+ return &rtl_regdom_12_13_5g_all;
default:
return &rtl_regdom_no_midband;
}
@@ -384,6 +398,25 @@ static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode)
return NULL;
}
+static u8 channel_plan_to_country_code(u8 channelplan)
+{
+ switch (channelplan) {
+ case 0x20:
+ case 0x21:
+ return COUNTRY_CODE_WORLD_WIDE_13;
+ case 0x22:
+ return COUNTRY_CODE_IC;
+ case 0x32:
+ return COUNTRY_CODE_TELEC_NETGEAR;
+ case 0x41:
+ return COUNTRY_CODE_GLOBAL_DOMAIN;
+ case 0x7f:
+ return COUNTRY_CODE_WORLD_WIDE_13_5G_ALL;
+ default:
+ return COUNTRY_CODE_MAX; /*Error*/
+ }
+}
+
int rtl_regd_init(struct ieee80211_hw *hw,
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request))
@@ -396,11 +429,12 @@ int rtl_regd_init(struct ieee80211_hw *hw,
return -EINVAL;
/* init country_code from efuse channel plan */
- rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan;
+ rtlpriv->regd.country_code =
+ channel_plan_to_country_code(rtlpriv->efuse.channel_plan);
- RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
- "rtl: EEPROM regdomain: 0x%0x\n",
- rtlpriv->regd.country_code);
+ RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
+ "rtl: EEPROM regdomain: 0x%0x conuntry code: %d\n",
+ rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/rtlwifi/regd.h
index 3bbbaaa68530..f7f15bce35dd 100644
--- a/drivers/net/wireless/rtlwifi/regd.h
+++ b/drivers/net/wireless/rtlwifi/regd.h
@@ -49,6 +49,7 @@ enum country_code_type_t {
COUNTRY_CODE_GLOBAL_DOMAIN = 10,
COUNTRY_CODE_WORLD_WIDE_13 = 11,
COUNTRY_CODE_TELEC_NETGEAR = 12,
+ COUNTRY_CODE_WORLD_WIDE_13_5G_ALL = 13,
/*add new channel plan above this line */
COUNTRY_CODE_MAX
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index 86ce5b1930e6..8ee83b093c0d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -1354,27 +1354,11 @@ void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci)
}
}
-static void rtl88ee_clear_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 tmp;
-
- tmp = rtl_read_dword(rtlpriv, REG_HISR);
- rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HISRE);
- rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HSISR);
- rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
void rtl88ee_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- rtl88ee_clear_interrupt(hw);/*clear it here first*/
rtl_write_dword(rtlpriv, REG_HIMR,
rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE,
@@ -1919,8 +1903,8 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
"dev_addr: %pM\n", rtlefuse->dev_addr);
/*channel plan */
rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
- /* set channel paln to world wide 13 */
- rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+ /* set channel plan from efuse */
+ rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
/*tx power*/
_rtl88ee_read_txpower_info_from_hwpg(hw,
rtlefuse->autoload_failflag,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
index ef28c8ea1e84..02013df968a0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
@@ -23,7 +23,7 @@
*
*****************************************************************************/
-#include "pwrseqcmd.h"
+#include "../pwrseqcmd.h"
#include "pwrseq.h"
/* drivers should parse below arrays and do the corresponding actions */
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
index 79103347d967..f2d9c6116e5c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
@@ -26,7 +26,7 @@
#ifndef __RTL8723E_PWRSEQ_H__
#define __RTL8723E_PWRSEQ_H__
-#include "pwrseqcmd.h"
+#include "../pwrseqcmd.h"
/* Check document WM-20110607-Paul-RTL8188EE_Power_Architecture-R02.vsd
* There are 6 HW Power States:
* 0: POFF--Power Off
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index d310d55d800e..189859617db8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -889,7 +889,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version));
rtl92c_init_beacon_parameters(hw, rtlhal->version);
rtl92c_init_ampdu_aggregation(hw);
- rtl92c_init_beacon_max_error(hw, true);
+ rtl92c_init_beacon_max_error(hw);
return err;
}
@@ -1323,7 +1323,6 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
bt_msr &= 0xfc;
- rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
if (type == NL80211_IFTYPE_UNSPECIFIED || type ==
NL80211_IFTYPE_STATION) {
_rtl92cu_stop_tx_beacon(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index adb810794eef..f3db6bc8596a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -613,7 +613,7 @@ void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, 0x4CA, 0x0708);
}
-void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode)
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
index bf53652e4edd..58548e8f2c41 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
@@ -66,7 +66,7 @@ void rtl92c_init_edca_param(struct ieee80211_hw *hw,
void rtl92c_init_edca(struct ieee80211_hw *hw);
void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw);
-void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode);
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw);
void rtl92c_init_rdg_setting(struct ieee80211_hw *hw);
void rtl92c_init_retry_function(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
index c5d4b8013cde..232865cc3ffd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
@@ -875,7 +875,7 @@ static void _rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
break;
default:
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], Unkown packet!! CmdId(%#X)!\n", c2h_cmd_id);
+ "[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id);
break;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
index da0a6125f314..5f14308e8eb3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
@@ -1584,28 +1584,11 @@ void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci)
}
}
-static void rtl92ee_clear_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 tmp;
-
- tmp = rtl_read_dword(rtlpriv, REG_HISR);
- rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HISRE);
- rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HSISR);
- rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
void rtl92ee_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- rtl92ee_clear_interrupt(hw);/*clear it here first*/
-
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
rtlpci->irq_enabled = true;
@@ -2194,8 +2177,8 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw)
"dev_addr: %pM\n", rtlefuse->dev_addr);
/*channel plan */
rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
- /* set channel paln to world wide 13 */
- rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+ /* set channel plan from efuse */
+ rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
/*tx power*/
_rtl92ee_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag,
hwinfo);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index 67bb47d77b68..a4b7eac6856f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -1258,18 +1258,6 @@ void rtl8723e_set_qos(struct ieee80211_hw *hw, int aci)
}
}
-static void rtl8723e_clear_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 tmp;
-
- tmp = rtl_read_dword(rtlpriv, REG_HISR);
- rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HISRE);
- rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-}
-
void rtl8723e_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1284,7 +1272,6 @@ void rtl8723e_disable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- rtl8723e_clear_interrupt(hw);/*clear it here first*/
rtl_write_dword(rtlpriv, 0x3a8, IMR8190_DISABLED);
rtl_write_dword(rtlpriv, 0x3ac, IMR8190_DISABLED);
rtlpci->irq_enabled = false;
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
index 69d4f0fc1af1..d5da0f3c1217 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
@@ -613,7 +613,7 @@ static void _rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], Unkown packet!! CmdId(%#X)!\n", c2h_cmd_id);
+ "[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id);
break;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
index b681af3c7a35..c983d2fe147f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
@@ -1634,28 +1634,11 @@ void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci)
}
}
-static void rtl8723be_clear_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 tmp;
-
- tmp = rtl_read_dword(rtlpriv, REG_HISR);
- rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HISRE);
- rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HSISR);
- rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
void rtl8723be_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- rtl8723be_clear_interrupt(hw);/*clear it here first*/
-
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
rtlpci->irq_enabled = true;
@@ -2139,8 +2122,8 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
- /* set channel plan to world wide 13 */
- rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+ /* set channel plan from efuse */
+ rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
if (rtlhal->oem_id == RT_CID_DEFAULT) {
/* Does this one have a Toshiba SMID from group 1? */
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
index 8704eee9f3a4..3236d44b459d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
@@ -2253,31 +2253,11 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
}
}
-static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 tmp;
- tmp = rtl_read_dword(rtlpriv, REG_HISR);
- /*printk("clear interrupt first:\n");
- printk("0x%x = 0x%08x\n",REG_HISR, tmp);*/
- rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HISRE);
- /*printk("0x%x = 0x%08x\n",REG_HISRE, tmp);*/
- rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HSISR);
- /*printk("0x%x = 0x%08x\n",REG_HSISR, tmp);*/
- rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- rtl8821ae_clear_interrupt(hw);/*clear it here first*/
-
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
rtlpci->irq_enabled = true;
@@ -3232,8 +3212,8 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
if (rtlefuse->eeprom_channelplan == 0xff)
rtlefuse->eeprom_channelplan = 0x7F;
- /* set channel paln to world wide 13 */
- /* rtlefuse->channel_plan = (u8)rtlefuse->eeprom_channelplan; */
+ /* set channel plan from efuse */
+ rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
/*parse xtal*/
rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE];
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 5d54d16a59e7..f238ee54226c 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -763,8 +763,7 @@ static u64 wl1251_op_prepare_multicast(struct ieee80211_hw *hw,
return (u64)(unsigned long)fp;
}
-#define WL1251_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
+#define WL1251_SUPPORTED_FILTERS (FIF_ALLMULTI | \
FIF_FCSFAIL | \
FIF_BCN_PRBRESP_PROMISC | \
FIF_CONTROL | \
@@ -795,10 +794,6 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
wl->rx_config = WL1251_DEFAULT_RX_CONFIG;
wl->rx_filter = WL1251_DEFAULT_RX_FILTER;
- if (*total & FIF_PROMISC_IN_BSS) {
- wl->rx_config |= CFG_BSSID_FILTER_EN;
- wl->rx_config |= CFG_RX_ALL_GOOD;
- }
if (*total & FIF_ALLMULTI)
/*
* CFG_MC_FILTER_EN in rx_config needs to be 0 to receive
@@ -825,7 +820,7 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- if (*total & FIF_ALLMULTI || *total & FIF_PROMISC_IN_BSS)
+ if (*total & FIF_ALLMULTI)
ret = wl1251_acx_group_address_tbl(wl, false, NULL, 0);
else if (fp)
ret = wl1251_acx_group_address_tbl(wl, fp->enabled,
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 717c4f5a02c2..49aca2cf7605 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -24,6 +24,7 @@
#include <linux/ip.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
+#include <linux/irq.h>
#include "../wlcore/wlcore.h"
#include "../wlcore/debug.h"
@@ -578,7 +579,7 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
[PART_TOP_PRCM_ELP_SOC] = {
- .mem = { .start = 0x00A02000, .size = 0x00010000 },
+ .mem = { .start = 0x00A00000, .size = 0x00012000 },
.reg = { .start = 0x00807000, .size = 0x00005000 },
.mem2 = { .start = 0x00800000, .size = 0x0000B000 },
.mem3 = { .start = 0x00000000, .size = 0x00000000 },
@@ -862,6 +863,7 @@ static int wl18xx_pre_upload(struct wl1271 *wl)
{
u32 tmp;
int ret;
+ u16 irq_invert;
BUILD_BUG_ON(sizeof(struct wl18xx_mac_and_phy_params) >
WL18XX_PHY_INIT_MEM_SIZE);
@@ -911,6 +913,28 @@ static int wl18xx_pre_upload(struct wl1271 *wl)
/* re-enable FDSP clock */
ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
MEM_FDSP_CLK_120_ENABLE);
+ if (ret < 0)
+ goto out;
+
+ ret = irq_get_trigger_type(wl->irq);
+ if ((ret == IRQ_TYPE_LEVEL_LOW) || (ret == IRQ_TYPE_EDGE_FALLING)) {
+ wl1271_info("using inverted interrupt logic: %d", ret);
+ ret = wlcore_set_partition(wl,
+ &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_top_reg_read(wl, TOP_FN0_CCCR_REG_32, &irq_invert);
+ if (ret < 0)
+ goto out;
+
+ irq_invert |= BIT(1);
+ ret = wl18xx_top_reg_write(wl, TOP_FN0_CCCR_REG_32, irq_invert);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
+ }
out:
return ret;
@@ -1351,9 +1375,10 @@ out:
}
#define WL18XX_CONF_FILE_NAME "ti-connectivity/wl18xx-conf.bin"
-static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
+
+static int wl18xx_load_conf_file(struct device *dev, struct wlcore_conf *conf,
+ struct wl18xx_priv_conf *priv_conf)
{
- struct wl18xx_priv *priv = wl->priv;
struct wlcore_conf_file *conf_file;
const struct firmware *fw;
int ret;
@@ -1362,14 +1387,14 @@ static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
if (ret < 0) {
wl1271_error("could not get configuration binary %s: %d",
WL18XX_CONF_FILE_NAME, ret);
- goto out_fallback;
+ return ret;
}
if (fw->size != WL18XX_CONF_SIZE) {
wl1271_error("configuration binary file size is wrong, expected %zu got %zu",
WL18XX_CONF_SIZE, fw->size);
ret = -EINVAL;
- goto out;
+ goto out_release;
}
conf_file = (struct wlcore_conf_file *) fw->data;
@@ -1379,7 +1404,7 @@ static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
"expected 0x%0x got 0x%0x", WL18XX_CONF_MAGIC,
conf_file->header.magic);
ret = -EINVAL;
- goto out;
+ goto out_release;
}
if (conf_file->header.version != cpu_to_le32(WL18XX_CONF_VERSION)) {
@@ -1387,28 +1412,32 @@ static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
"expected 0x%08x got 0x%08x",
WL18XX_CONF_VERSION, conf_file->header.version);
ret = -EINVAL;
- goto out;
+ goto out_release;
}
- memcpy(&wl->conf, &conf_file->core, sizeof(wl18xx_conf));
- memcpy(&priv->conf, &conf_file->priv, sizeof(priv->conf));
+ memcpy(conf, &conf_file->core, sizeof(*conf));
+ memcpy(priv_conf, &conf_file->priv, sizeof(*priv_conf));
+
+out_release:
+ release_firmware(fw);
+ return ret;
+}
- goto out;
+static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
+{
+ struct wl18xx_priv *priv = wl->priv;
-out_fallback:
- wl1271_warning("falling back to default config");
+ if (wl18xx_load_conf_file(dev, &wl->conf, &priv->conf) < 0) {
+ wl1271_warning("falling back to default config");
- /* apply driver default configuration */
- memcpy(&wl->conf, &wl18xx_conf, sizeof(wl18xx_conf));
- /* apply default private configuration */
- memcpy(&priv->conf, &wl18xx_default_priv_conf, sizeof(priv->conf));
+ /* apply driver default configuration */
+ memcpy(&wl->conf, &wl18xx_conf, sizeof(wl->conf));
+ /* apply default private configuration */
+ memcpy(&priv->conf, &wl18xx_default_priv_conf,
+ sizeof(priv->conf));
+ }
- /* For now we just fallback */
return 0;
-
-out:
- release_firmware(fw);
- return ret;
}
static int wl18xx_plt_init(struct wl1271 *wl)
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
index a433a75f3cd7..bac2364c8e72 100644
--- a/drivers/net/wireless/ti/wl18xx/reg.h
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -109,6 +109,7 @@
#define WL18XX_WELP_ARM_COMMAND (WL18XX_REGISTERS_BASE + 0x7100)
#define WL18XX_ENABLE (WL18XX_REGISTERS_BASE + 0x01543C)
+#define TOP_FN0_CCCR_REG_32 (WL18XX_TOP_OCP_BASE + 0x64)
/* PRCM registers */
#define PLATFORM_DETECTION 0xA0E3E0
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 0be807951afe..ef3fe0fff588 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -3175,8 +3175,7 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
return (u64)(unsigned long)fp;
}
-#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
+#define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
FIF_FCSFAIL | \
FIF_BCN_PRBRESP_PROMISC | \
FIF_CONTROL | \
@@ -5966,10 +5965,6 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
{
int ret;
- ret = wl12xx_set_power_on(wl);
- if (ret < 0)
- return ret;
-
ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
if (ret < 0)
goto out;
@@ -5985,7 +5980,6 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
ret = wl->ops->get_mac(wl);
out:
- wl1271_power_off(wl);
return ret;
}
@@ -6077,7 +6071,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
IEEE80211_HW_QUEUE_CONTROL |
- IEEE80211_HW_CHANCTX_STA_CSA;
+ IEEE80211_HW_CHANCTX_STA_CSA |
+ IEEE80211_HW_SUPPORT_FAST_XMIT;
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -6432,10 +6427,22 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
else
wl->irq_flags |= IRQF_ONESHOT;
+ ret = wl12xx_set_power_on(wl);
+ if (ret < 0)
+ goto out_free_nvs;
+
+ ret = wl12xx_get_hw_info(wl);
+ if (ret < 0) {
+ wl1271_error("couldn't get hw info");
+ wl1271_power_off(wl);
+ goto out_free_nvs;
+ }
+
ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
wl->irq_flags, pdev->name, wl);
if (ret < 0) {
- wl1271_error("request_irq() failed: %d", ret);
+ wl1271_error("interrupt configuration failed");
+ wl1271_power_off(wl);
goto out_free_nvs;
}
@@ -6449,12 +6456,7 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
}
#endif
disable_irq(wl->irq);
-
- ret = wl12xx_get_hw_info(wl);
- if (ret < 0) {
- wl1271_error("couldn't get hw info");
- goto out_irq;
- }
+ wl1271_power_off(wl);
ret = wl->ops->identify_chip(wl);
if (ret < 0)
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index e7af261e9198..89b6f69f09c8 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -1230,7 +1230,7 @@ static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
}
#define SUPPORTED_FIF_FLAGS \
- (FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \
+ (FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \
FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)
static void zd_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
@@ -1256,7 +1256,7 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
* we will have some issue with IPv6 which uses multicast for link
* layer address resolution.
*/
- if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI))
+ if (*new_flags & FIF_ALLMULTI)
zd_mc_add_all(&hash);
spin_lock_irqsave(&mac->lock, flags);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0d2594395ffb..f1b2c1721917 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -52,7 +52,7 @@
* event channels are limited resource. Split event channels are
* enabled by default.
*/
-bool separate_tx_rx_irq = 1;
+bool separate_tx_rx_irq = true;
module_param(separate_tx_rx_irq, bool, 0644);
/* The time that packets can stay on the guest Rx internal queue
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e031c943286e..c89ca26e254d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1560,9 +1560,8 @@ static int xennet_init_queue(struct netfront_queue *queue)
spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
- init_timer(&queue->rx_refill_timer);
- queue->rx_refill_timer.data = (unsigned long)queue;
- queue->rx_refill_timer.function = rx_refill_timeout;
+ setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
+ (unsigned long)queue);
snprintf(queue->name, sizeof(queue->name), "%s-q%u",
queue->info->netdev->name, queue->id);